From cbe19272a73b993b67fc4e5e8f5940252357ce98 Mon Sep 17 00:00:00 2001 From: jenkins-x-bot Date: Fri, 14 Apr 2023 14:57:32 +0000 Subject: [PATCH] chore: release 1.11.12 --- changelog.md | 10 + charts/lighthouse/Chart.yaml | 4 +- charts/lighthouse/README.md | 308 ++++++++++++----------- charts/lighthouse/lighthouse-1.11.12.tgz | Bin 0 -> 15774 bytes charts/lighthouse/values.yaml | 151 +---------- 5 files changed, 170 insertions(+), 303 deletions(-) create mode 100644 changelog.md create mode 100644 charts/lighthouse/lighthouse-1.11.12.tgz diff --git a/changelog.md b/changelog.md new file mode 100644 index 000000000..7e67cee46 --- /dev/null +++ b/changelog.md @@ -0,0 +1,10 @@ + +## Changes in version 1.11.12 + +### New Features + +* Add risk label (slimm609) + +### Bug Fixes + +* fix label name (slimm609) diff --git a/charts/lighthouse/Chart.yaml b/charts/lighthouse/Chart.yaml index edc18364e..f24a38847 100644 --- a/charts/lighthouse/Chart.yaml +++ b/charts/lighthouse/Chart.yaml @@ -3,6 +3,6 @@ description: | This chart bootstraps installation of [Lighthouse](https://github.com/jenkins-x/lighthouse). icon: https://raw.githubusercontent.com/jenkins-x/jenkins-x-website/master/images/logo/jenkinsx-icon-color.svg name: lighthouse -version: 0.1.0-SNAPSHOT +version: 1.11.12 home: https://github.com/jenkins-x/lighthouse - +appVersion: 1.11.12 diff --git a/charts/lighthouse/README.md b/charts/lighthouse/README.md index ed4700eef..48370b1c7 100644 --- a/charts/lighthouse/README.md +++ b/charts/lighthouse/README.md @@ -42,157 +42,161 @@ helm uninstall my-lighthouse --namespace lighthouse ## Values -| Key | Type | Description | Default | -| --------------------------------------------------- | ------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `cluster.crds.create` | bool | Create custom resource definitions | `true` | -| `configMaps.config` | string | Raw `config.yaml` content | `nil` | -| `configMaps.configUpdater` | object | Settings used to configure the `config-updater` plugin | `{"orgAndRepo":"","path":""}` | -| `configMaps.create` | bool | Enables creation of `config.yaml` and `plugins.yaml` config maps | `false` | -| `configMaps.plugins` | string | Raw `plugins.yaml` content | `nil` | -| `engines.jenkins` | bool | Enables the Jenkins engine | `false` | -| `engines.jx` | bool | Enables the jx engine | `true` | -| `engines.tekton` | bool | Enables the tekton engine | `false` | -| `env` | object | Environment variables | `{"JX_DEFAULT_IMAGE":""}` | -| `externalPlugins[0].name` | string | | `"cd-indicators"` | -| `externalPlugins[0].requiredResources[0].kind` | string | | `"Service"` | -| `externalPlugins[0].requiredResources[0].name` | string | | `"cd-indicators"` | -| `externalPlugins[0].requiredResources[0].namespace` | string | | `"jx"` | -| `externalPlugins[1].name` | string | | `"lighthouse-webui-plugin"` | -| `externalPlugins[1].requiredResources[0].kind` | string | | `"Service"` | -| `externalPlugins[1].requiredResources[0].name` | string | | `"lighthouse-webui-plugin"` | -| `externalPlugins[1].requiredResources[0].namespace` | string | | `"jx"` | -| `foghorn.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods | `{}` | -| `foghorn.image.pullPolicy` | string | Template for computing the foghorn controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `foghorn.image.repository` | string | Template for computing the foghorn controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-foghorn"` | -| `foghorn.image.tag` | string | Template for computing the foghorn controller docker image tag | `"{{ .Values.image.tag }}"` | -| `foghorn.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `foghorn.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods | `{}` | -| `foghorn.replicaCount` | int | Number of replicas | `1` | -| `foghorn.resources.limits` | object | Resource limits applied to the foghorn pods | `{"cpu":"100m","memory":"256Mi"}` | -| `foghorn.resources.requests` | object | Resource requests applied to the foghorn pods | `{"cpu":"80m","memory":"128Mi"}` | -| `foghorn.terminationGracePeriodSeconds` | int | Termination grace period for foghorn pods | `180` | -| `foghorn.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods | `[]` | -| `gcJobs.backoffLimit` | int | Set the backoff limit for failed cronJobs | `6` | -| `gcJobs.concurrencyPolicy` | string | Drives the job's concurrency policy | `"Forbid"` | -| `gcJobs.failedJobsHistoryLimit` | int | Drives the failed jobs history limit | `1` | -| `gcJobs.image.pullPolicy` | string | Template for computing the gc job docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `gcJobs.image.repository` | string | Template for computing the gc job docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-gc-jobs"` | -| `gcJobs.image.tag` | string | Template for computing the gc job docker image tag | `"{{ .Values.image.tag }}"` | -| `gcJobs.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `gcJobs.maxAge` | string | Max age from which `LighthouseJob`s will be deleted | `"168h"` | -| `gcJobs.schedule` | string | Cron expression to periodically delete `LighthouseJob`s | `"0/30 * * * *"` | -| `gcJobs.successfulJobsHistoryLimit` | int | Drives the successful jobs history limit | `3` | -| `git.kind` | string | Git SCM provider (`github`, `gitlab`, `stash`) | `"github"` | -| `git.server` | string | Git server URL | `""` | -| `githubApp.enabled` | bool | Enables GitHub app authentication | `false` | -| `githubApp.username` | string | GitHub app user name | `"jenkins-x[bot]"` | -| `hmacSecretName` | string | Existing hmac secret to use for webhooks | `""` | -| `hmacToken` | string | Secret used for webhooks | `""` | -| `hmacTokenEnabled` | bool | Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud | `true` | -| `image.parentRepository` | string | Docker registry to pull images from | `"ghcr.io/jenkins-x"` | -| `image.pullPolicy` | string | Image pull policy | `"IfNotPresent"` | -| `image.tag` | string | Docker images tag the following tag is latest on the main branch, it's a specific version on a git tag | `"latest"` | -| `jenkinscontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | -| `jenkinscontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `jenkinscontroller.image.repository` | string | Template for computing the Jenkins controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller"` | -| `jenkinscontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | -| `jenkinscontroller.jenkinsToken` | string | The token for authenticating the Jenkins user | `nil` | -| `jenkinscontroller.jenkinsURL` | string | The URL of the Jenkins instance | `nil` | -| `jenkinscontroller.jenkinsUser` | string | The username for the Jenkins user | `nil` | -| `jenkinscontroller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `jenkinscontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | -| `jenkinscontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | -| `jenkinscontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | -| `jenkinscontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | -| `jenkinscontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | -| `jenkinscontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | -| `jenkinscontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | -| `keeper.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods | `{}` | -| `keeper.datadog.enabled` | string | Enables datadog | `"true"` | -| `keeper.env` | object | Lets you define keeper specific environment variables | `{}` | -| `keeper.image.pullPolicy` | string | Template for computing the keeper controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `keeper.image.repository` | string | Template for computing the keeper controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-keeper"` | -| `keeper.image.tag` | string | Template for computing the keeper controller docker image tag | `"{{ .Values.image.tag }}"` | -| `keeper.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `keeper.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `keeper.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods | `{}` | -| `keeper.podAnnotations` | object | Annotations applied to the keeper pods | `{}` | -| `keeper.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `keeper.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `keeper.replicaCount` | int | Number of replicas | `1` | -| `keeper.resources.limits` | object | Resource limits applied to the keeper pods | `{"cpu":"400m","memory":"512Mi"}` | -| `keeper.resources.requests` | object | Resource requests applied to the keeper pods | `{"cpu":"100m","memory":"128Mi"}` | -| `keeper.service` | object | Service settings for the keeper controller | `{"externalPort":80,"internalPort":8888,"type":"ClusterIP"}` | -| `keeper.statusContextLabel` | string | Label used to report status to git provider | `"Lighthouse Merge Status"` | -| `keeper.terminationGracePeriodSeconds` | int | Termination grace period for keeper pods | `30` | -| `keeper.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods | `[]` | -| `lighthouseJobNamespace` | string | Namespace where `LighthouseJob`s and `Pod`s are created | Deployment namespace | -| `logFormat` | string | Log format either json or stackdriver | `"json"` | -| `logService` | string | The name of the service registered with logging | `""` | -| `logStackSkip` | string | Comma separated stack frames to skip from the log | `""` | -| `oauthSecretName` | string | Existing Git token secret | `""` | -| `oauthToken` | string | Git token (used when GitHub app authentication is not enabled) | `""` | -| `poller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods | `{}` | -| `poller.contextMatchPattern` | string | Regex pattern to use to match commit status context | `""` | -| `poller.datadog.enabled` | string | Enables datadog | `"true"` | -| `poller.enabled` | bool | Whether to enable or disable the poller component | `false` | -| `poller.env` | object | Lets you define poller specific environment variables | `{"POLL_HOOK_ENDPOINT":"http://hook/hook/poll","POLL_PERIOD":"20s"}` | -| `poller.image.pullPolicy` | string | Template for computing the poller controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `poller.image.repository` | string | Template for computing the poller controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-poller"` | -| `poller.image.tag` | string | Template for computing the poller controller docker image tag | `"{{ .Values.image.tag }}"` | -| `poller.internalPort` | int | | `8888` | -| `poller.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `poller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `poller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods | `{}` | -| `poller.podAnnotations` | object | Annotations applied to the poller pods | `{}` | -| `poller.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `poller.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `poller.replicaCount` | int | Number of replicas | `1` | -| `poller.requireReleaseSuccess` | bool | Keep polling releases until the most recent commit status is successful | `false` | -| `poller.resources.limits` | object | Resource limits applied to the poller pods | `{"cpu":"400m","memory":"512Mi"}` | -| `poller.resources.requests` | object | Resource requests applied to the poller pods | `{"cpu":"100m","memory":"128Mi"}` | -| `poller.terminationGracePeriodSeconds` | int | Termination grace period for poller pods | `30` | -| `poller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods | `[]` | -| `scope` | string | set scope to either `cluster` or `namespace` for permissions | `cluster` | -| `tektoncontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | -| `tektoncontroller.dashboardTemplate` | string | Go template expression for URLs in the dashboard if not using Tekton dashboard | `""` | -| `tektoncontroller.dashboardURL` | string | the dashboard URL (e.g. Tekton dashboard) | `""` | -| `tektoncontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `tektoncontroller.image.repository` | string | Template for computing the tekton controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-tekton-controller"` | -| `tektoncontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | -| `tektoncontroller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `tektoncontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | -| `tektoncontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | -| `tektoncontroller.replicaCount` | int | Number of replicas | `1` | -| `tektoncontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | -| `tektoncontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | -| `tektoncontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | -| `tektoncontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | -| `tektoncontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | -| `user` | string | Git user name (used when GitHub app authentication is not enabled) | `""` | -| `webhooks.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods | `{}` | -| `webhooks.customDeploymentTriggerCommand` | string | deployments can configure the ability to allow custom lighthouse triggers using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing | `""` | -| `webhooks.image.pullPolicy` | string | Template for computing the webhooks controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `webhooks.image.repository` | string | Template for computing the webhooks controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-webhooks"` | -| `webhooks.image.tag` | string | Template for computing the webhooks controller docker image tag | `"{{ .Values.image.tag }}"` | -| `webhooks.ingress.annotations` | object | Webhooks ingress annotations | `{}` | -| `webhooks.ingress.enabled` | bool | Enable webhooks ingress | `false` | -| `webhooks.ingress.hosts` | list | Webhooks ingress host names | `[]` | -| `webhooks.ingress.ingressClassName` | string | Webhooks ingress ingressClassName | `nil` | -| `webhooks.labels` | object | allow optional labels to be added to the webhook deployment | `{}` | -| `webhooks.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `webhooks.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `webhooks.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods | `{}` | -| `webhooks.podAnnotations` | object | Annotations applied to the webhooks pods | `{}` | -| `webhooks.podLabels` | object | | `{}` | -| `webhooks.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `webhooks.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `webhooks.replicaCount` | int | Number of replicas | `1` | -| `webhooks.resources.limits` | object | Resource limits applied to the webhooks pods | `{"cpu":"100m","memory":"512Mi"}` | -| `webhooks.resources.requests` | object | Resource requests applied to the webhooks pods | `{"cpu":"80m","memory":"128Mi"}` | -| `webhooks.service` | object | Service settings for the webhooks controller | `{"annotations":{},"externalPort":80,"internalPort":8080,"type":"ClusterIP"}` | -| `webhooks.serviceName` | string | Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out | `"hook"` | -| `webhooks.terminationGracePeriodSeconds` | int | Termination grace period for webhooks pods | `180` | -| `webhooks.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods | `[]` | +| Key | Type | Description | Default | +|-----|------|-------------|---------| +| `cluster.crds.create` | bool | Create custom resource definitions | `true` | +| `configMaps.config` | string | Raw `config.yaml` content | `nil` | +| `configMaps.configUpdater` | object | Settings used to configure the `config-updater` plugin | `{"orgAndRepo":"","path":""}` | +| `configMaps.create` | bool | Enables creation of `config.yaml` and `plugins.yaml` config maps | `false` | +| `configMaps.plugins` | string | Raw `plugins.yaml` content | `nil` | +| `engines.jenkins` | bool | Enables the Jenkins engine | `false` | +| `engines.jx` | bool | Enables the jx engine | `true` | +| `engines.tekton` | bool | Enables the tekton engine | `false` | +| `env` | object | Environment variables | `{"JX_DEFAULT_IMAGE":""}` | +| `externalPlugins[0].name` | string | | `"cd-indicators"` | +| `externalPlugins[0].requiredResources[0].kind` | string | | `"Service"` | +| `externalPlugins[0].requiredResources[0].name` | string | | `"cd-indicators"` | +| `externalPlugins[0].requiredResources[0].namespace` | string | | `"jx"` | +| `externalPlugins[1].name` | string | | `"lighthouse-webui-plugin"` | +| `externalPlugins[1].requiredResources[0].kind` | string | | `"Service"` | +| `externalPlugins[1].requiredResources[0].name` | string | | `"lighthouse-webui-plugin"` | +| `externalPlugins[1].requiredResources[0].namespace` | string | | `"jx"` | +| `foghorn.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods | `{}` | +| `foghorn.image.pullPolicy` | string | Template for computing the foghorn controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `foghorn.image.repository` | string | Template for computing the foghorn controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-foghorn"` | +| `foghorn.image.tag` | string | Template for computing the foghorn controller docker image tag | `"{{ .Values.image.tag }}"` | +| `foghorn.logLevel` | string | | `"info"` | +| `foghorn.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods | `{}` | +| `foghorn.replicaCount` | int | Number of replicas | `1` | +| `foghorn.resources.limits` | object | Resource limits applied to the foghorn pods | `{"cpu":"100m","memory":"256Mi"}` | +| `foghorn.resources.requests` | object | Resource requests applied to the foghorn pods | `{"cpu":"80m","memory":"128Mi"}` | +| `foghorn.terminationGracePeriodSeconds` | int | Termination grace period for foghorn pods | `180` | +| `foghorn.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods | `[]` | +| `gcJobs.backoffLimit` | int | Drives the job's backoff limit | `6` | +| `gcJobs.concurrencyPolicy` | string | Drives the job's concurrency policy | `"Forbid"` | +| `gcJobs.failedJobsHistoryLimit` | int | Drives the failed jobs history limit | `1` | +| `gcJobs.image.pullPolicy` | string | Template for computing the gc job docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `gcJobs.image.repository` | string | Template for computing the gc job docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-gc-jobs"` | +| `gcJobs.image.tag` | string | Template for computing the gc job docker image tag | `"{{ .Values.image.tag }}"` | +| `gcJobs.logLevel` | string | | `"info"` | +| `gcJobs.maxAge` | string | Max age from which `LighthouseJob`s will be deleted | `"168h"` | +| `gcJobs.schedule` | string | Cron expression to periodically delete `LighthouseJob`s | `"0/30 * * * *"` | +| `gcJobs.successfulJobsHistoryLimit` | int | Drives the successful jobs history limit | `3` | +| `git.kind` | string | Git SCM provider (`github`, `gitlab`, `stash`) | `"github"` | +| `git.server` | string | Git server URL | `""` | +| `githubApp.enabled` | bool | Enables GitHub app authentication | `false` | +| `githubApp.username` | string | GitHub app user name | `"jenkins-x[bot]"` | +| `hmacSecretName` | string | Existing hmac secret to use for webhooks | `""` | +| `hmacToken` | string | Secret used for webhooks | `""` | +| `hmacTokenEnabled` | bool | Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud | `true` | +| `hmacTokenVolumeMount` | object | Mount hmac token as a volume instead of using an environment variable Secret reference | `{"enabled":false}` | +| `image.parentRepository` | string | Docker registry to pull images from | `"ghcr.io/jenkins-x"` | +| `image.pullPolicy` | string | Image pull policy | `"IfNotPresent"` | +| `image.tag` | string | Docker images tag the following tag is latest on the main branch, it's a specific version on a git tag | `"latest"` | +| `jenkinscontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | +| `jenkinscontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `jenkinscontroller.image.repository` | string | Template for computing the Jenkins controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller"` | +| `jenkinscontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | +| `jenkinscontroller.jenkinsToken` | string | The token for authenticating the Jenkins user | `nil` | +| `jenkinscontroller.jenkinsURL` | string | The URL of the Jenkins instance | `nil` | +| `jenkinscontroller.jenkinsUser` | string | The username for the Jenkins user | `nil` | +| `jenkinscontroller.logLevel` | string | | `"info"` | +| `jenkinscontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | +| `jenkinscontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | +| `jenkinscontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | +| `jenkinscontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | +| `jenkinscontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | +| `jenkinscontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | +| `jenkinscontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | +| `keeper.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods | `{}` | +| `keeper.datadog.enabled` | string | Enables datadog | `"true"` | +| `keeper.env` | object | Lets you define keeper specific environment variables | `{}` | +| `keeper.image.pullPolicy` | string | Template for computing the keeper controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `keeper.image.repository` | string | Template for computing the keeper controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-keeper"` | +| `keeper.image.tag` | string | Template for computing the keeper controller docker image tag | `"{{ .Values.image.tag }}"` | +| `keeper.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `keeper.logLevel` | string | | `"info"` | +| `keeper.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods | `{}` | +| `keeper.podAnnotations` | object | Annotations applied to the keeper pods | `{}` | +| `keeper.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `keeper.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `keeper.replicaCount` | int | Number of replicas | `1` | +| `keeper.resources.limits` | object | Resource limits applied to the keeper pods | `{"cpu":"400m","memory":"512Mi"}` | +| `keeper.resources.requests` | object | Resource requests applied to the keeper pods | `{"cpu":"100m","memory":"128Mi"}` | +| `keeper.service` | object | Service settings for the keeper controller | `{"externalPort":80,"internalPort":8888,"type":"ClusterIP"}` | +| `keeper.statusContextLabel` | string | Label used to report status to git provider | `"Lighthouse Merge Status"` | +| `keeper.terminationGracePeriodSeconds` | int | Termination grace period for keeper pods | `30` | +| `keeper.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods | `[]` | +| `lighthouseJobNamespace` | string | Namespace where `LighthouseJob`s and `Pod`s are created | Deployment namespace | +| `logFormat` | string | Log format either json or stackdriver | `"json"` | +| `logService` | string | The name of the service registered with logging | `""` | +| `logStackSkip` | string | Comma separated stack frames to skip from the log | `""` | +| `oauthSecretName` | string | Existing Git token secret | `""` | +| `oauthToken` | string | Git token (used when GitHub app authentication is not enabled) | `""` | +| `oauthTokenVolumeMount` | object | Mount Git token as a volume instead of using an environment variable Secret reference (used when GitHub app authentication is not enabled) | `{"enabled":false}` | +| `poller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods | `{}` | +| `poller.contextMatchPattern` | string | Regex pattern to use to match commit status context | `""` | +| `poller.datadog.enabled` | string | Enables datadog | `"true"` | +| `poller.enabled` | bool | Whether to enable or disable the poller component | `false` | +| `poller.env` | object | Lets you define poller specific environment variables | `{"POLL_HOOK_ENDPOINT":"http://hook/hook/poll","POLL_PERIOD":"20s"}` | +| `poller.image.pullPolicy` | string | Template for computing the poller controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `poller.image.repository` | string | Template for computing the poller controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-poller"` | +| `poller.image.tag` | string | Template for computing the poller controller docker image tag | `"{{ .Values.image.tag }}"` | +| `poller.internalPort` | int | | `8888` | +| `poller.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `poller.logLevel` | string | | `"info"` | +| `poller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods | `{}` | +| `poller.podAnnotations` | object | Annotations applied to the poller pods | `{}` | +| `poller.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `poller.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `poller.replicaCount` | int | Number of replicas | `1` | +| `poller.requireReleaseSuccess` | bool | Keep polling releases until the most recent commit status is successful | `false` | +| `poller.resources.limits` | object | Resource limits applied to the poller pods | `{"cpu":"400m","memory":"512Mi"}` | +| `poller.resources.requests` | object | Resource requests applied to the poller pods | `{"cpu":"100m","memory":"128Mi"}` | +| `poller.terminationGracePeriodSeconds` | int | Termination grace period for poller pods | `30` | +| `poller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods | `[]` | +| `scope` | string | limit permissions to namespace privileges | `"cluster"` | +| `tektoncontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | +| `tektoncontroller.dashboardTemplate` | string | Go template expression for URLs in the dashboard if not using Tekton dashboard | `""` | +| `tektoncontroller.dashboardURL` | string | the dashboard URL (e.g. Tekton dashboard) | `""` | +| `tektoncontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `tektoncontroller.image.repository` | string | Template for computing the tekton controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-tekton-controller"` | +| `tektoncontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | +| `tektoncontroller.logLevel` | string | | `"info"` | +| `tektoncontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | +| `tektoncontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | +| `tektoncontroller.replicaCount` | int | Number of replicas | `1` | +| `tektoncontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | +| `tektoncontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | +| `tektoncontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | +| `tektoncontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | +| `tektoncontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | +| `user` | string | Git user name (used when GitHub app authentication is not enabled) | `""` | +| `webhooks.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods | `{}` | +| `webhooks.customDeploymentTriggerCommand` | string | deployments can configure the ability to allow custom lighthouse triggers using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing | `""` | +| `webhooks.image.pullPolicy` | string | Template for computing the webhooks controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `webhooks.image.repository` | string | Template for computing the webhooks controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-webhooks"` | +| `webhooks.image.tag` | string | Template for computing the webhooks controller docker image tag | `"{{ .Values.image.tag }}"` | +| `webhooks.ingress.annotations` | object | Webhooks ingress annotations | `{}` | +| `webhooks.ingress.enabled` | bool | Enable webhooks ingress | `false` | +| `webhooks.ingress.hosts` | list | Webhooks ingress host names | `[]` | +| `webhooks.ingress.ingressClassName` | string | Webhooks ingress ingressClassName | `nil` | +| `webhooks.ingress.tls.enabled` | bool | Enable webhooks ingress tls | `false` | +| `webhooks.ingress.tls.secretName` | string | Specify webhooks ingress tls secretName | `""` | +| `webhooks.labels` | object | allow optional labels to be added to the webhook deployment | `{}` | +| `webhooks.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `webhooks.logLevel` | string | | `"info"` | +| `webhooks.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods | `{}` | +| `webhooks.podAnnotations` | object | Annotations applied to the webhooks pods | `{}` | +| `webhooks.podLabels` | object | | `{}` | +| `webhooks.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `webhooks.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `webhooks.replicaCount` | int | Number of replicas | `1` | +| `webhooks.resources.limits` | object | Resource limits applied to the webhooks pods | `{"cpu":"100m","memory":"512Mi"}` | +| `webhooks.resources.requests` | object | Resource requests applied to the webhooks pods | `{"cpu":"80m","memory":"128Mi"}` | +| `webhooks.service` | object | Service settings for the webhooks controller | `{"annotations":{},"externalPort":80,"internalPort":8080,"type":"ClusterIP"}` | +| `webhooks.serviceName` | string | Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out | `"hook"` | +| `webhooks.terminationGracePeriodSeconds` | int | Termination grace period for webhooks pods | `180` | +| `webhooks.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods | `[]` | You can look directly at the [values.yaml](./values.yaml) file to look at the options and their default values. diff --git a/charts/lighthouse/lighthouse-1.11.12.tgz b/charts/lighthouse/lighthouse-1.11.12.tgz new file mode 100644 index 0000000000000000000000000000000000000000..4117388ee762adc51693c2ec02931416cf665e6c GIT binary patch literal 15774 zcmZX*Wl$Ym)3&{FcXxM(;O_2hG(ZUM?y?E)?(PJ42o?zL?j9V1yYH9l{_6Sl&5x;? zshVE3rdD;I-F-A!G(0xozYEL&U^0=@U^kUh=27zI=l;&6&2Fa2ZLP1#&!eiX&7-Vi zXJg`E=B=gfC@g1gXAd~@e|BBxn>+rf?t#p2G>|{c6!4?TuRL75709E;qI|Go1)z3PZ%l>?0^= z{|*R`=_e1Q#3HAFL3DC2bII`)4W=7fzC{)M2dV6)btU;ob!C)CiT=He?nA zTVN82>j9%I?||qiSSdAu?k+!~K7tfFZ#>09$T^|z2}vN^98EVPTTO(XPn~%|46*Gj zFJ=|4{H_fT;|%*@-B3f?DR`tZ^p@X8E^`Kf-+T7l)7jS9WS9efUH8p??upws6|sW2 zLVn8dbZ|O`7>01H08&w{}B*?CFv+NG23 zEroa4Z^`=s-Ydk2fnqJ_5?2?yprhX>R|2%2$6`-R+Q5zh~eV7$l zT<%f?szZO97^dG}NmZ(c37irEP~o#{1=8@P6v*(ZLP~X|X|)368j-~W5A^fHL%^(r z$`%XC*|?Bw-@}AvbxpX<0bJ;qc?pk=G*SO*$tnNAw2PZSW{-!5?pX9H9UqO9Mu6sk zq8!)(h8sK^=qICBRoL$ZldCEU&%e?K;g<_6cWR;Vsx6$dP+`&H~0qgIYD9i)=*OK-U<*+xpgzoaDk2-e>hp`Cf9ea#k%HKp>At4Vqg+#njzE}} zThabDg^~Mx5RGj}Zqk)ANx1)xF3OKN1fN5jgDhG@1od58Tfn4cKok#u+R=#`K$#0Dh=4gxD<%$RBT*igLp&~|JP;h5J(yO3W z%q+o5YJz74p|v0C;=s^L{F13;qs5OJq>1@WNBg1MtNaviG`+~iIu621^hN54K^i%< z>t0C-eUM=6Hl#(356AxXUr*f@@w5{)YXCmKudLd zUa+KUy}E>Sy!xGN48zToTlkpCj@4fYG_%zPlU5^8yW%!B1g%wLsnN(AhXC$z*~xmlwtfI5+YvhYQdEyKv)+6O*gH z`Gsd@XbmT2MnqEmD%OwGqP+@5Ccv+@`9>G0qwO2h5oNrbO({Y5MEm6V|}Ai=iEr6}ECW(T~o z05qno0)cNqhZ%b?SyDmV@`-&k$@FQQ;bI4!kVIF=q;QRWm|wV8%wMNP#gBMfHmko4 zZT}{C^&9pinMJWDO3dn+lF!{u&eBXf!4T{5Z`vK_n&qE8Y`4$b*4L+zq=}Fp_{Dvd z3Z04%3E}bAgi^;picV506j0PdKa^{(VDT042a-2=O3{sh_AGrbA5N_s#LUb63CCfRFJ7GD~I)kHzo{gOwtBS>i^Yy-wszDPVtfnQH?*TL_sO3EE90JLKj5R`I-$9a7$a@Cu zY0KA-AEZ2ux!VPRt#ZY_--LqdCWUH_EySK2Wf8IRI~9vafH=B{0ikiDuQ=|)n%}2y zxY^XR**J8C|NUey;TP1?9m{F01 z;lhXoN3->Bl4H#0_z}=2mx8A8R&RGpTq7h|M<;qcZ6m+P5d=>6AU)IF4YJlSLsUGP zOMAd-TB=vK|Lkpr41NY)8^g%a&ewk1)vkX5_X9Z7s-tOE29>gq_Ki`+@ZrR9@;GMC znxMb>sSASiv@I4;kO62KW!4a5!L+f=fmZlwWwp=~E*;yh6^I>ekZ^k1C`kvYJ^aK9 zxoNp+U)9cDBNRvq1@Ok8>0rO&mnor+IwQ0k|B1vMZ4O|?a)r<#&y=dxs`HCshuPZ#At$0;y%2n>%QiHwhcyKD0YfG0dyX8ftBio61Wp0 z-F6V)(ntc2rfu0vzK%He8bEs8SP^r>f4F&jFq>sV(s)r%Z>7?!QxBEU{LxUJ=|eI_ zm~&w&fwXkl3SAzdIM~X>8VRmfHTh zHa3Q#Q`pLE;!8NLZO&bdq+5Zh>OfOft!k~zt>-BztzR1xX&TS`ow+jwEjN-?cPfr2K7y>`%(*-%$^_L0FTh3RJH zW{^3GRJoTP>z*8Q2t{I6e*iJ`XFuOQhafeDtmjvK?-5ChXGy|MSITt4u^(fMMj3~v zMG`dlP%~4=q<>_k{Gq4JYl&|{Q;|B}T%vm*=bQ$&8CagO!uWN(W8FxiyCfD{6AgN@ zmS^~vZSo{Hy%6dOzn##0)#P68$R*>%iEe?dX6cW|u-wTT7(*3>NnER37h=x1%gw;0 z;4$GV)iy|Nnfpr!YT~tpqkN-HGsf z3#j_YN2n}a4=3j!Nr;Jmjaw?+SC5!Te5vC|5^>HshCqaeA+@+EFZha+7)^_ix>!5d z#-o3OPp2Q6MUy4lrjgcM6ne*C{VLQ|f^(onKkMg{Oek=a^!YZ0h8vri#abH4O0J03 zCedw4mxcwUD6u5fQ50K6u$*pqSf^ap@=|qcpSfz_(;b~t?6$#&HfC5UML|LnXg~(q zwMzX=xI#&Qs^2ulk(-W-i$|S`SK3!ju(85LN7Mu2u$g~=eZAcRX0grNZ3ydNR+RQH zRa8X2lKA_$`rLfXhe|OSVql8ng&IjY(Ra5}^?6xL4a+YT@sl%Nj25ipJTH*_SRiJt zUHcgjvO$tB!CLZ_rv2$yWyJI*^a1M#T1iTT!<3e96Ur8SjHCvhxrIW>yY5Su;*qo;NeiSb)yI*iErQ${z6g(B zHGDC^ZyLURvD(Zn+3TfJQgT}m6{7MB44N?xf1sVC3}Mkd4RCyGQWh>vBoiJ^lQ+<% zBHbtZh=Pt8DmIIK41MpUaL#7!YJBdFEAOnzK8Qnq<5%0r7LW-TUqt;8|GjY%&jR?o z>mpfT)ZnpV&pXZ+ovvaZX{Nl?*8|A;B_|T9@DSj+pGErQmdSm2cIBu62Sg<8XZi;z zCiS(zK#GHf={u%-5~Xhg`g$>uF-};?V~k!(+iH1q{pY zmAx^0OzT@+nwf0=_JZYa4MC$3X@ z=`DYxO5?~q@%!^-A2EiIh~0#Eu~)K*=WzOxU2R2>F3eli zN@xh{%66jY^}Y~k>=lWXAOXR58NsNEPunMfF6=Z4wO%{%_^@8otxD`sW~G8PAPN}6 z!z0LWD?o!whgx+J=h)wHxBjv$n-KM#3j9)y*c7`yb||*6$3}u}KLI@NP$%a7{q!{4 zNqkLn)2!IUW{YB(F9eU@Co^U<4b-b%4Gru|+z1CryQaCmQ@Ku7&}x(Ul?)ILF3H&$ z5m%P+T#G4>awF9y8j->0 zXEUU&0`c9PAKXd6kNGtM&&I58SGVJ!*Z1zp4-kv|#IFn0nq>B0%N5v1@2v6NFx(}ef1ittmDO-gci2`ST*V6s!lU?` z`Q;|hkWq}SW^jKx{kx_1uOSI(CuM6#W%O{;eWw`($-yy4y*FO-B4X*_OkZ*D3)+mV zRycP2jW$P>2buXK#?KN!IcsWiixjT3zO%2v9F-y@vh*>)&z?XV;Q-PY0#z8>r9Rui z3@WVJ2|AT=q#jGXFHuNNE{+i4zDZa3#!Jo2iSIkHcbwK%Sr(`uc!SP7-r0+aJ7@zv?5`Cwu_xpJPOk<{(%4mW#i@P{>AYZBm69SYLBj92W%6?_ z!BljpjuI|Jg!~h7ida(q@)sFiLtX#F zOU6*eXw2!P5M(!cjzLGgk$49TIF6Iw0fM?|?uPypT8%Qd>LhVVjhLdgJM$&pu0H*w zHDcv04AALHW6QQiyf@_>&Mk3AVjRAi9yTHX6I!RerQ?qa9;&ZH@||9Q*EUy2$3g4w zTLF$#;2Nq)7CwTZg2C(Sl9Gy|kUeka@N&O`E4_o`hTJ#*%npU2NDIVsxO{(vlHaPnOr=X)fNg>w zfP%RXxB(h66O{X?oJgQFtneBRbefh(h^!(V!oI`72AL7@7rlOtHRX~np%rv1@d?J2 zG?1bhaK3w~?-X%RL*;Rea)!=61O8@38m^X!@SkS?lxm4@#Gp3YkAp9yLW~T{@R`Dh z3c0#2N$BQqTwJcl2JAV6+8{=Sen^}>TDI&JLtol+H zw|7W3j-|ZxYt$Hn{oVa;9!}Gfo8sZ{S|%tu7i)uNiSxUJJB5sK_)wIcY9GU8Q3`0L zc8zP`c2nhQNPk&F}f*;K5tfTJo zM1y1FDP99FlI;p&`>X{CsIbFCAVKjCGw--hzF$Hi@ZapLJj)(1?vC;+m7Ikv?4IH? zoJuIPZG4lyhe4_8MKBDWOzvlVv?7>iIiqQ7RP%oApF5CK=1Z&ElnN99{^kJw)2RhW z&j=y}`_>F7Wlp;MOU%_<)>tdZh}|OA?XreZd7j*+fk)q zx8Dj3BkiJ@XAM4rI*}Ca>0>+zk>PVm3Ci(h)z`?EMHW|N1{^nd@?7^Iw_li02|n+j z(GOEgQ*c9GiJQMAwHSEd#w9g}0ft1PjY?Ov@JZC=#lyi!k#4{m1W`jy12NZ0f&aHH zWndPWm<7OjO1Ss!`U=UVy~~2nGqFzFgd6s5MYt*Ni|3{fE4cC=KDm8`9?f+RJsJ`V<3M@$b4IF?WEKY}6 z_Ah?Luo9g`rUrfIfNU?w&+2DTAJLNs#);+uKKjZHv5e@~0aB5f8sN#y$5m&0pdifgc{K zCf|%@Im=(vA^@A#pa?!Chu=)0)dPpUG=)T( zQ4)_^e93H2p{bk0aBjn9K^?{ym=3^^@4Te5*u0G-qX#URarn9x_zC;pv{vJfTUm7zsqb^< z)2MZvATNoJtG;_j(OzBjwxu$!6a}s&Yp9vX7F}r(r$NqS#AYz8W17pn$a6NIlVO+^ z881*IhL;)c6=NOmSaL<5UcAA;jf|XyGCvpXMS+8?ZNAho>?hn%FVlTV{V|$cQtVDq zIYocRACmlgD&KJc`_|WgU950OSz_EG!N%^F-DF;9fOVf*m8-2)N6=E^3(-?$d=wC2 zHiqwRoBSAtPs)1H!?!!HBwm_ci3%Y4An?*%mU*+?aR*+fdEb7xkO8-)-8sF0IW_g# zUwUG`bDV<+rc6D@*XBMO7p(6fd2rLCjpKjEic}2jnE6C>50-iFkyquI+y~pvgYou2 z7mk;9cOYANrWr6ew6y$ERPPAv1!>qaS8M)p>G&9Z3-a6px92Sk1nCU!H>}NR!)>d3 zTL>l`o%!Sk{rfe-DTD1=Avkp)|5Z_n$GvGs9vU2x=)wzE08?@lOD)LF=8#T=;|J6K zZiwAc*75K9%&6wJhZxa;LFX##xo7zj=e?)xf6*mPi)_Fghjh2;yHMSzjvp<8dENEL zca^`~IEG8YU9dv*q^mRuQfzor7dDsfU_^=eFJu${@V5e&!dQ1v$TN@p^{~hOi0Tq` zu}|++)~@r$LmKliQK~{4Pwgph?bLz^dv0iDe}qCJu*F-GM^URciM;}pOai;_Xj8|V z&;U(e`azRX9~IkQ^GK$}M$mI()%O!j#E+z_-rLW;N`?N$I0Xh=s8A^5HaTp8=qXdU zvgiH9-rm$*1muzhgN+|;NFG6Eyelqke(|UcdKd05zYiOEt|jwvX$Fl;hfN3 zrS^b>A;>1)K%`Za3Fv44tC68w5Q1jl+ATPD_wDl0$o>qplysO0w#|C_$AQbP(jV)3 z%bkI)3-5(qt&Q~qmIts6x4l-fSW@-|BG3#Jcy{bGP6K=JJQ&-`D?fuZa3MtlbMA`E z{xj?PpV_UdqPMor?nLmXSwV32KewKrMXw)dPv**%Do;%DOj*RQqWWOod~l;X?stIu zEqJkN?j7y5JNCnp#rp3vTX;W~;3L~$3DchH5C?NdSK8F1GD&+Ocni3SQbEIyqoXAG z3NuE6pe|o+zfi);I-n$NE=U z3;Nj!F6?=)v;se#E?x|Mc(PdIED4tq-7TBVX$}eAKEUN}x;#UbCr}yZcUD;Y$pity zKg^5RAOf=-lZu-Wx&-4Z5@=plv&Yd@C{TCU#gK?FSJ#@yeB^GOB%*;h`NF&9>e@*8 zL`F1;2IP$7+1gA-H|}q;!!%fv$^t>YD80ASpw`s6N@4b*y4JR}EUq@V(ZQMR0FVe( zhMzgfmg9rc^#(q9S)WGl`lst z+G#9XJomr!P+0zXb+*`wYkKK)CftZ=y6fg^avZGq6==S6&xGbergCo2S9+?o)jhW& z>HoDF^TQJE1VRuYxP|i(_HUMSPg}NzgHY&S=TF$}?@I zQOn=6C^opvTii_G7RCHiQbOoOym#QbwoP`EozPpsWAzui-c>E`P7|8)IVB!5G!MxL znfezbiKO?F!+-?36pIs(4)%v_;*;^aPWEknOG$Xs1&eK*w!@sQI)Ke=*dXk7WwWvM zi|a=n?VC=`dmYV3|JDQT$E*Gvl-M&PLW`G8CD&WASpB}{?Yu5CtZ zW!+dA@O??9<&=WMrH1d~@HlK-*#sw+QF2&vo>8BQxbU+*@}80!dKQ*?le#^_*R0LR zt=hUUUhR~V3(N#A$+@Und)4QdIn;Qko;S2$D0ob}r^+KVEYzrxHB~-z)MR~31o^}+ z>fXNP4}$-18}BG!V@3TR?w7{8J})2h6VE*%llhr8=irx4>DTrVuWLgC@IuD?Dl@r8Rp3B%9U(>sQ~pDC8t8TvA>|wj z*)~M_Wm^E;lr9;yqb%h#lDVgGX>tbkRQ%X}@7{W__^6}8v8b-9))8dA*Of(RKkO0- z!64oRxW?!n?c8-+kW}BIa?XTd;1&yO)y=&Zi3CCB8aoB*9Zxf+0B6NYM-laNmJQ_~3v^nhQfVG>Atef9>**-Vw`P4u0lE@C~zzJ4E2WL-1CBHMpl|d3^%xPA%Lt3Ev}a`qSJGa4WmJ-2+!pt(pq= z&4T|*tNVRyQ~+E|{!j|uHe-4GFXX0Nfc;zaKJ5<_oP8_^{3m=DVEtmm4A6e?@TH8O z%Hl&{LK1VL27g0hhkU|!af%Lh)U?^(Cc7fJ+l%t%F)TD^M$x(Fo!Ps{(2_&|u>^f_*3uSV@OM?4+ZHZ`2Y{V+#k^W4&n<6-I z&W*P&HqsW&BI5c)&YddYDHCa{q%PP_-qtsEbOfIRKtN|!B~Rre zU;GHr9!_@2$#TT+53uTTqTtOF@&{J<5EhejlF9E7#5W>MSmvuv$O=hZbQ5K)z|5De za2ZQeckz<0W(?6HAcKb|CLQYKG0ImS?`g_TC+#m@{j1UX?zJpeRAq7dKcGXu-dx@- zf;28OzX33NPUIhw%|$O`h&@+>X&LrZ^;Bu|Z{9QS(u<$<6-4M=sx2|QR@i)XLSCS* zfdRjye<;#2K;`~qGOyEkRK8+ME$mbAsrlDq_+jud1;;g+27VD-mwtEr45-R)J@+L+ z1BE_Y%^pUf!CrTAo+2sNQ*P+-Nmgsy!NI?3l(PjR{>=eVcN6brB4ki^aS|-~BV?`d zpTkghE5%}iy*>jHT_FYBJ^5Zu`9pJm|)`Aqig&0*-VrcHgQd* z-27a6?Q;9g1`Fm9pFno8XKkFvb#AOH&c}`XaLu}^hzV5HeX{DW^~$&Xnk!uQ!&XRx zmGpJQ^S^KVc>n6w(hL*xyGYg=?K`iNx1UDAI{`PLw5*1*{8WnbDqfG*J5C=)Behj2 zyd_f?!N`M^f)pSqOKCE*W^H$I?NB=W3i2%HrUhD!KXZwQXC_cu#nV_k~g1 z0a6=}3DgogPr)enV00}k5D$~LI;v%Y79hb@nFS-JYd_8V?{U?Qt(|xpF&SO2Hrl{C zF#M;J{l*xikko)4{t|*A*E8dhndjO zP51%&^kPy3Q}KPd7E|E+0nqXJMarJ&nZwzC;wb}+#($kJ7Z^ciuFb33bO`Qv1Go8> zKKFFDgH44i{=OMAiuJ~K`UHSKoeHp-==LSQROye+A^2iT<@)1u!OskH{9hqwze!*#_g4Evg}CtZJN?7h}#Dog4_kD&hoYWwUZO zd}YMGA}2yaD9OW`-#OBQ5}ow+_7Fl+nPZp?G^93M83?lq;u~bmHkUEmr~}sjuyS$Y z8=f@@?(aW{N3+b&*8iKHwdNBhFg$8D-!y7bo_s5A^KX>gZCP=lx%YV4xa~4C#`A+V6wYH)z4)1GDmEq;aLfQ z)A;yxF)3&jIj66wwIB*U0rVyaMwG|w zJVI(&gwauP1a;uXUq}_(oW?ZnG?zk~_LV+Z!>S@WGKf_%2lB}_aomBY*S{dp{Xznm z%^h3I_hX*z6y%%z#Ad62@+qy4L`B0&?o7+dZb6x9GyhY{8gI*+pZ$RQlJ+UkAqsl; zmTP{JunmHLlh}eZOG4r9DsYu)PnoRpDpO<6+NvR$em2Zy4H+U=$9DazLGzlnIEzc#v!zuFhv1AYZ#B74<3rw`JS!3FP{PjnBhs? zZ~ksU!ZVRoTxv%(Sq$GO8^~Z$*=5{E`Zk1|Z*T>tASpxn7&eGS0;#qH74CrU3*VLS z0KapYyU*Trt2#1Mfhs!ZW+sUaD zFEb$MFjt}XJ9A6s8C~AOdj61xD>X-yPXlK{|AuIoW^C1kC(^H3ZKSw8#_tRY<_qsd z1*{s589mD^{e&9Da{YyG1j{6cXI7fXmB2nfe+I(P4+aC~) z3tEbxz73(&%O%*EhnLh~4|2B{7*`YwSekVH_Wb0PDW;F*Y>%;a3Uy+>=L;;nV!{F>qg+j0oPKCy_!B()k_$E<> zLia^htPn;Lu}1Kx+HJi=Gz_j!!i+?0m;yif6cikOmZ{Ui>;t|@SXO+9z^M+No!lRX z3xtH53VpP|xxcn$&EEB5G7M3R;hbZPzq7u{#Md0wDK$-loQ^dIa7CYi6^C!0mdWd1KY6KO3ARhvFxj2U=(vYBeO6DI)F1xI+;7q@vtNVF&w(V5o4X1sUD%ukE zi)JNPX3ti;v-L6xko1;DG6jyAjr>N8Xny=pw(`lLq#<@I%I&Vap-~*O&Ng-8L0nA_ z+_iafno`q2&!SXdbrX1h@NAde^n@LwRgPH;U*P$foL#4byH#EveAxtI)t3#g>?c3 zzGQ>zXd$g~-*1351N}--Xw|}Zy5G~SGkCk5+dcUX{wF^j_**3lLM8Cj-$_u0 z0)L3_%IP>5zJvFZ@SXR8WgW_Zz?Dr}gy4}82fPI$r;g*JzO#s_1NieOz4^?#!V8yu zU?uhy!;p-mus^$mHz|JyLEUG8pKUZa>J*v-lx2(}@?MJ5W&y{kx+(s`pZCbbNuwbl zJ8k{0J0@fLBxJX>Z&f3X?qT3U!)IP^j)%@&)tiwr6JDq)O10 z(dPlVOQ*o;xJ@6G?@B1a0`~i}`bZ}Fja-$xzrS#Z-A0n}02APr=F7!hY;lyW!7wVV zAq+iK!J^Xg@^TPB8P!XiK7uM=0T@lI+)xVs<;J!3PDCCd7oNA{Re4ufjnbF zaLCg?`S-+Q?|1WGW-H2;77tio%lmrsbrLKWC=oPuF$WOn zTIofjKK+~2PY0}_K{?xRIscsFX_lq-}63CGl=-x`@<~jZ*DnP z3=1fD`l9?sY()>O+fn-s7+dE8!bW!&x(%xlRobeUs>=l>6F$eYe* z?bs(tpAy!VwneYReA=NvtqlA+wyFffOo{g1tK$^MH(WR^HjzLVLYT^3W@BeXu9AZt z8?-W9lv)e6!#X7k*&>LN^PO>UYzc*@3h-}2SF^)3rJOM$RE4~-6} zt~;DnDIq+0f1&;#tc|5U%}}EIBQf{!|HtPx?aghr+SE^XZy(6;E*C*6tltw8`>cfK zs4dm9D$kN}l#K`~PKghU6A}BEC+KH$vem;CupLmF|1!R)47{(B<6xe0oMj)1BaG$R zsji)^3>2m>jm>hDdgCZGu}RJ&>dr^QRCM#i+4@xpFcYjh5P)y$&g%|Z)#sH0Ry)f~tP2P<^ zM6R=W*N57$)M?v{QRYmi1pi_iN}f~rN8ng zXOQGdWDs`f8}s=6dG<+Z@t^gIgBfF-OyU=hmw|8>TN;txgTknO42DCVZjKwM2drzg zN};cRy>LKS^|$eAT!E7h0ZE57-#3-}wj=u9HS~l_1tIdjO}X4F9}22%+aTC> zs&9}szjkkxbT8%=AQV2Hi~IUIf~g$RD^%E#9)fgnF7|CT&+2YRUf1hbMl5}<%DovWli7ADP;_1m zJ@u#4K81UKnb4feqlQcMlcmnblMv|>B(IO7&kS7W@EG0^Sl|3m2wV2K6_oNZIF&zs ze_!5%bQ8Y6xk9T*n7O2k$`A_Mle&dD>my(HOLsyf?!Hh#dn@_Cab^IWsS|%8ywH1y zqUcEuT>cT~;_emA&nNl@PpMG0B8lDgOWUXDAB!I4|IK13%q>2)LI6Wa8UHL|M6yoD zne!J*Z5=H&!uUi#cY4vU;0JP$6(=B_7};;Q8Wg&`jZ+dJ5EH1$!HsmW4$Uvj+~k$; z%gK6Xj5e^`Sncu8?r~@ai}Eu~0J+_xCVyz)FKBoDK2|rUirm6G9)%Z^PP=nFKZc6r zh=fAx-ND`B;uyd(MiPR=FhNFxf+i-1Efa-}Q9{4sP=$kpB1Oo*-Tw*|@e#Ah5r~Y; zK{j-OvfGOycZSp2#v{A4k;J-*^AjlakhcpOUtcf5U?8Mu2qJHJ z_$SZ`=MqCb-kp1XfJZl2Mt{SxcV{$Z{%6`;y-YWP_{}suDwUr^^jrw_RpDl5PSdQ(5t(+38V^|IgPve8>R5=z4mGD13RSe=ffXUiUc&A+Yyh}DDoV|K z5Qo&X7Z8&GnD@|C>-~`%nO$t&Abq%Nb<#Kgeukq^jR-I|V@iOj9LY9jmebNf zYwl5$0m1&RMRbKn+D>`C4i#hMPb44u>WEmwke-l3*?>9zIU<7)t_wd( z52s$)#{FmmnTnIPiqr!Avv+z_y#EsdNp2WB$Xhw|Ds0b>zs;-)B>=nP{7hRlo)%lN zDi@B<8hqHe$DZ8qPe+*J`5H=vgo7JLBa1hB#cG-!fzsLXuruwrIHKKfQs?l)?$zo7l`<-9_H+;Foz3fkPbxcmrm(X6aLu?@bC zLw3TPnAP>z!$Z+@MMqNO{EALeH$tENb(ADB#w^W1iadV*u;cDnWxrP323Kd?6VSbQ z_=sVHwssOLN25>@y+eV(-?7Ij0CEr*=dC~p->FZ50?Kr;FY>w^5|s&&VccWzrF$by ze8ag>D}46!fx(VveZq!UD$VU(m<(vlGq-d;avYA(ziXchIr!fk(^~m_GOdjKfM>sx zLZNaH{n2asQxP{^0#wK|zWP;YB04&w!0LZ2m1xn~vBeqx(Vsw4G1|~&Wo_=rF^re|#UBl3kz#NAr?dUN z)^ksPR6j^<>yM?m(a0)=eM^=5NXF>6d~?WCBV@nbyLp%k_vyGaLfxFVjH;m_9?&k2 z`)}>{ATkQi;Wa{0$J0u~wsqA6PjzGAK;T(}a_poiHdxeo@z-Y-v+E z)+X>fqHo4%zRYH5y@G^&u0@_rNrL62}Sa*i-7ZDUx9|JcT!}SjdudbLsI;<=sJ!f57muiR5eWX`af{jZx0PmY-1U*t2y0N-7T|dwVx$88oEK3<>*2 z5;Qr+^gfMsWr4cN^4E53@qcg5@VmrFdNd)yETUa-wi}91T+p15N`Em+V-6JIDR7fxI#3*6<2FA_~5_| z+O51WS4N%sOkB-i6Z8hB0b?eno?eGM=99UXL`@M;Z3Ye{SKC3}Q+Z*U!S;S~IhdNDIs zy_2WRXbyjsBxGZ~Ve7CzFTQIeZKAk+u?un2!)fT(s`P?1R+|yMmB}cV?Sd(xpLL!h z^!{6oUvD9qzFHV1nq~fGe9^7V#)(#BHC?zKg}x-B1PIp+1fd*Slff`8(_Pd_06!G_J^zJVk8=?Pp7Gou ziX&YNdJv>OSvs&rCB9|ZGukQU*&8$f9kXV!E*X`%?EVtEzWo(rmxmp}R*%xWX5n|h z`!|idW6Ip}XD*OjuZLn-KdS_u6SI*;ERhT@K>5Kec0_nzu?pq#&((zN2vCK-ad-vxk-)3R?Hwf zm@AM{ByXuX+gV?rB~tC@ps1-nvi0eaEBxg!x+39G+jnNt_^zg!x2eB48NO}n{FhdF z8lRur?e^4AWnP0!LYl@eV_r$M)OIA>RaJ0@c&$t56jOHi!_)|^mUYtqXl?Qq$dtS49`im!F-DIHj=8rBK2U)6qOZy>3FU02$ zUZ3**D~U;?TeCm(&vJ)aDoJq|`G+8#0Yy#(rF;!mI4?yzN-l=Xk7300UL^D^Nlgha zn{8SI02_4M5Jf*JBzqU$jZp~waD;s%3zD`$=KS6HIPq7Nz&dQEK+TMO*lri2j&C0dyA%wZF>rRH+`N0IV0o%w%_P zXOBG~vub@G_iHRNw=E~s7^qBB+;AdHk^#J88I5>C9N`00qJuG0`nhpfwzmTD$BQ0H zq*E$pbfIY8Rxkak^a<4e>+1d*r1%E%^u+?ZvQ$8Tbvz41LZx>Yrdih;lMFP<^fLUr zt%NbpG zP8!6-CiCLaRtc2Dv7_Igl#9LwKeIxAe(Rz5*3;J14mM%2Cb`+ZiSE_RG~BL8Xp_%w Xmd`em2aA4=VuMH3$twUa5P<&&9MPmc literal 0 HcmV?d00001 diff --git a/charts/lighthouse/values.yaml b/charts/lighthouse/values.yaml index 54f43949c..bfb7e0061 100644 --- a/charts/lighthouse/values.yaml +++ b/charts/lighthouse/values.yaml @@ -1,80 +1,57 @@ git: # git.kind -- Git SCM provider (`github`, `gitlab`, `stash`) kind: github - # git.server -- Git server URL server: "" - # lighthouseJobNamespace -- Namespace where `LighthouseJob`s and `Pod`s are created # @default -- Deployment namespace lighthouseJobNamespace: "" - githubApp: # githubApp.enabled -- Enables GitHub app authentication enabled: false - # githubApp.username -- GitHub app user name - username: "jenkins-x[bot]" - + username: "jenkins-x[bot]" # user -- Git user name (used when GitHub app authentication is not enabled) user: "" - # oauthToken -- Git token (used when GitHub app authentication is not enabled) oauthToken: "" - # oauthSecretName -- Existing Git token secret oauthSecretName: "" - # oauthTokenVolumeMount -- Mount Git token as a volume instead of using an environment variable Secret reference (used when GitHub app authentication is not enabled) oauthTokenVolumeMount: enabled: false - # hmacToken -- Secret used for webhooks hmacToken: "" - # hmacSecretName -- Existing hmac secret to use for webhooks hmacSecretName: "" - # hmacTokenEnabled -- Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud hmacTokenEnabled: true - # hmacTokenVolumeMount -- Mount hmac token as a volume instead of using an environment variable Secret reference hmacTokenVolumeMount: enabled: false - # logFormat -- Log format either json or stackdriver logFormat: "json" - # logService -- The name of the service registered with logging logService: "" - # logStackSkip -- Comma separated stack frames to skip from the log logStackSkip: "" - # scope -- limit permissions to namespace privileges scope: "cluster" - cluster: crds: # cluster.crds.create -- Create custom resource definitions create: true - image: # image.parentRepository -- Docker registry to pull images from parentRepository: ghcr.io/jenkins-x - # image.tag -- Docker images tag # the following tag is latest on the main branch, it's a specific version on a git tag - tag: latest - + tag: 1.11.12 # image.pullPolicy -- Image pull policy pullPolicy: IfNotPresent - # env -- Environment variables env: JX_DEFAULT_IMAGE: "" - - externalPlugins: - name: cd-indicators requiredResources: @@ -86,392 +63,287 @@ externalPlugins: - kind: Service namespace: jx name: lighthouse-webui-plugin - gcJobs: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # gcJobs.maxAge -- Max age from which `LighthouseJob`s will be deleted maxAge: 168h - # gcJobs.schedule -- Cron expression to periodically delete `LighthouseJob`s schedule: "0/30 * * * *" - # gcJobs.failedJobsHistoryLimit -- Drives the failed jobs history limit failedJobsHistoryLimit: 1 - # gcJobs.successfulJobsHistoryLimit -- Drives the successful jobs history limit successfulJobsHistoryLimit: 3 - # gcJobs.concurrencyPolicy -- Drives the job's concurrency policy concurrencyPolicy: Forbid - # gcJobs.backoffLimit -- Drives the job's backoff limit backoffLimit: 6 - image: # gcJobs.image.repository -- Template for computing the gc job docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-gc-jobs" - # gcJobs.image.tag -- Template for computing the gc job docker image tag tag: "{{ .Values.image.tag }}" - # gcJobs.image.pullPolicy -- Template for computing the gc job docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - webhooks: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # webhooks.replicaCount -- Number of replicas replicaCount: 1 - # webhooks.terminationGracePeriodSeconds -- Termination grace period for webhooks pods terminationGracePeriodSeconds: 180 - image: # webhooks.image.repository -- Template for computing the webhooks controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-webhooks" - # webhooks.image.tag -- Template for computing the webhooks controller docker image tag tag: "{{ .Values.image.tag }}" - # webhooks.image.pullPolicy -- Template for computing the webhooks controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - - # webhooks.labels -- allow optional labels to be added to the webhook deployment labels: {} podLabels: {} - # webhooks.podAnnotations -- Annotations applied to the webhooks pods podAnnotations: {} - # webhooks.serviceName -- Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out serviceName: hook - # webhooks.service -- Service settings for the webhooks controller service: type: ClusterIP externalPort: 80 internalPort: 8080 annotations: {} - resources: # webhooks.resources.limits -- Resource limits applied to the webhooks pods limits: cpu: 100m # may require more memory to perform the initial 'git clone' cmd for big repositories memory: 512Mi - # webhooks.resources.requests -- Resource requests applied to the webhooks pods requests: cpu: 80m memory: 128Mi - # webhooks.probe -- Liveness and readiness probes settings probe: path: / - # webhooks.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # webhooks.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # webhooks.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods nodeSelector: {} - # webhooks.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods affinity: {} - # webhooks.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods tolerations: [] - ingress: # webhooks.ingress.enabled -- Enable webhooks ingress enabled: false - # webhooks.ingress.annotations -- Webhooks ingress annotations annotations: {} - # webhooks.ingress.ingressClassName -- Webhooks ingress ingressClassName ingressClassName: null - # webhooks.ingress.hosts -- Webhooks ingress host names hosts: [] - tls: # webhooks.ingress.tls.enabled -- Enable webhooks ingress tls enabled: false # webhooks.ingress.tls.secretName -- Specify webhooks ingress tls secretName secretName: "" - # webhooks.customDeploymentTriggerCommand -- deployments can configure the ability to allow custom lighthouse triggers # using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify # `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing customDeploymentTriggerCommand: "" - foghorn: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # foghorn.replicaCount -- Number of replicas replicaCount: 1 - # foghorn.terminationGracePeriodSeconds -- Termination grace period for foghorn pods terminationGracePeriodSeconds: 180 - image: # foghorn.image.repository -- Template for computing the foghorn controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-foghorn" - # foghorn.image.tag -- Template for computing the foghorn controller docker image tag tag: "{{ .Values.image.tag }}" - # foghorn.image.pullPolicy -- Template for computing the foghorn controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - resources: # foghorn.resources.limits -- Resource limits applied to the foghorn pods limits: cpu: 100m memory: 256Mi - # foghorn.resources.requests -- Resource requests applied to the foghorn pods requests: cpu: 80m memory: 128Mi - # foghorn.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods nodeSelector: {} - # foghorn.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods affinity: {} - # foghorn.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods tolerations: [] - - tektoncontroller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # tektoncontroller.dashboardURL -- the dashboard URL (e.g. Tekton dashboard) dashboardURL: '' # tektoncontroller.dashboardTemplate -- Go template expression for URLs in the dashboard if not using Tekton dashboard dashboardTemplate: '' - # tektoncontroller.replicaCount -- Number of replicas replicaCount: 1 - # tektoncontroller.terminationGracePeriodSeconds -- Termination grace period for tekton controller pods terminationGracePeriodSeconds: 180 - image: # tektoncontroller.image.repository -- Template for computing the tekton controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-tekton-controller" - # tektoncontroller.image.tag -- Template for computing the tekton controller docker image tag tag: "{{ .Values.image.tag }}" - # tektoncontroller.image.pullPolicy -- Template for computing the tekton controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # tektoncontroller.podAnnotations -- Annotations applied to the tekton controller pods podAnnotations: {} - # tektoncontroller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods nodeSelector: {} - # tektoncontroller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods affinity: {} - # tektoncontroller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods tolerations: [] - resources: # tektoncontroller.resources.limits -- Resource limits applied to the tekton controller pods limits: cpu: 100m memory: 256Mi - # tektoncontroller.resources.requests -- Resource requests applied to the tekton controller pods requests: cpu: 80m memory: 128Mi - # tektoncontroller.service -- Service settings for the tekton controller service: annotations: {} - jenkinscontroller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # jenkinscontroller.jenkinsURL -- The URL of the Jenkins instance jenkinsURL: - # jenkinscontroller.jenkinsUser -- The username for the Jenkins user jenkinsUser: - # jenkinscontroller.jenkinsToken -- The token for authenticating the Jenkins user jenkinsToken: - # jenkinscontroller.terminationGracePeriodSeconds -- Termination grace period for tekton controller pods terminationGracePeriodSeconds: 180 - image: # jenkinscontroller.image.repository -- Template for computing the Jenkins controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller" - # jenkinscontroller.image.tag -- Template for computing the tekton controller docker image tag tag: "{{ .Values.image.tag }}" - # jenkinscontroller.image.pullPolicy -- Template for computing the tekton controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # jenkinscontroller.podAnnotations -- Annotations applied to the tekton controller pods podAnnotations: {} - # jenkinscontroller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods nodeSelector: {} - # jenkinscontroller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods affinity: {} - # jenkinscontroller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods tolerations: [] - resources: # jenkinscontroller.resources.limits -- Resource limits applied to the tekton controller pods limits: cpu: 100m memory: 256Mi - # jenkinscontroller.resources.requests -- Resource requests applied to the tekton controller pods requests: cpu: 80m memory: 128Mi - # jenkinscontroller.service -- Service settings for the tekton controller service: annotations: {} - keeper: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # keeper.statusContextLabel -- Label used to report status to git provider statusContextLabel: "Lighthouse Merge Status" - # keeper.replicaCount -- Number of replicas replicaCount: 1 - # keeper.terminationGracePeriodSeconds -- Termination grace period for keeper pods terminationGracePeriodSeconds: 30 - image: # keeper.image.repository -- Template for computing the keeper controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-keeper" - # keeper.image.tag -- Template for computing the keeper controller docker image tag tag: "{{ .Values.image.tag }}" - # keeper.image.pullPolicy -- Template for computing the keeper controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # keeper.podAnnotations -- Annotations applied to the keeper pods podAnnotations: {} - # keeper.env -- Lets you define keeper specific environment variables env: {} - # keeper.service -- Service settings for the keeper controller service: type: ClusterIP externalPort: 80 internalPort: 8888 - resources: # keeper.resources.limits -- Resource limits applied to the keeper pods limits: cpu: 400m memory: 512Mi - # keeper.resources.requests -- Resource requests applied to the keeper pods requests: cpu: 100m memory: 128Mi - # keeper.probe -- Liveness and readiness probes settings probe: path: / - # keeper.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 120 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # keeper.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - datadog: # keeper.datadog.enabled -- Enables datadog enabled: "true" - # keeper.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods nodeSelector: {} - # keeper.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods affinity: {} - # keeper.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods tolerations: [] - poller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # poller.enabled -- Whether to enable or disable the poller component enabled: false - # poller.replicaCount -- Number of replicas replicaCount: 1 - # poller.terminationGracePeriodSeconds -- Termination grace period for poller pods terminationGracePeriodSeconds: 30 - image: # poller.image.repository -- Template for computing the poller controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-poller" - # poller.image.tag -- Template for computing the poller controller docker image tag tag: "{{ .Values.image.tag }}" - # poller.image.pullPolicy -- Template for computing the poller controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # poller.podAnnotations -- Annotations applied to the poller pods podAnnotations: {} - # poller.env -- Lets you define poller specific environment variables env: # poller.env.POLL_PERIOD the default time period between polling releases and pull requests POLL_PERIOD: 20s - # poller.env.POLL_RELEASE_PERIOD the time period between polling releases # POLL_RELEASE_PERIOD: 20s @@ -480,77 +352,58 @@ poller: # poller.env.POLL_HOOK_ENDPOINT the hook service endpoint to post webhooks to POLL_HOOK_ENDPOINT: http://hook/hook/poll - # poller.contextMatchPattern -- Regex pattern to use to match commit status context contextMatchPattern: "" - # poller.requireReleaseSuccess -- Keep polling releases until the most recent commit status is successful requireReleaseSuccess: false - resources: # poller.resources.limits -- Resource limits applied to the poller pods limits: cpu: 400m memory: 512Mi - # poller.resources.requests -- Resource requests applied to the poller pods requests: cpu: 100m memory: 128Mi - # poller.probe -- Liveness and readiness probes settings probe: path: / - # keeper.internalPort -- The internal port used to view metrics etc internalPort: 8888 - # poller.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 120 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # poller.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - datadog: # poller.datadog.enabled -- Enables datadog enabled: "true" - # poller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods nodeSelector: {} - # poller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods affinity: {} - # poller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods tolerations: [] - engines: # engines.jx -- Enables the jx engine jx: true - # engines.tekton -- Enables the tekton engine tekton: false - # engines.jenkins -- Enables the Jenkins engine jenkins: false - configMaps: # configMaps.create -- Enables creation of `config.yaml` and `plugins.yaml` config maps create: false - # configMaps.config -- Raw `config.yaml` content config: null - # configMaps.plugins -- Raw `plugins.yaml` content plugins: null - # configMaps.configUpdater -- Settings used to configure the `config-updater` plugin configUpdater: orgAndRepo: ""