From f5f20b12a300103e447b6a629e73bbb8985cceb1 Mon Sep 17 00:00:00 2001 From: jenkins-x-bot Date: Wed, 30 Oct 2024 11:59:07 +0000 Subject: [PATCH] chore: release 1.21.0 --- changelog.md | 6 + charts/lighthouse/Chart.yaml | 4 +- charts/lighthouse/README.md | 310 ++++++++++++------------ charts/lighthouse/lighthouse-1.21.0.tgz | Bin 0 -> 16065 bytes charts/lighthouse/values.yaml | 151 +----------- 5 files changed, 167 insertions(+), 304 deletions(-) create mode 100644 changelog.md create mode 100644 charts/lighthouse/lighthouse-1.21.0.tgz diff --git a/changelog.md b/changelog.md new file mode 100644 index 000000000..73a96995e --- /dev/null +++ b/changelog.md @@ -0,0 +1,6 @@ + +## Changes in version 1.21.0 + +### New Features + +* support loadBalancerSourceRanges for hook (MÃ¥rten Svantesson) diff --git a/charts/lighthouse/Chart.yaml b/charts/lighthouse/Chart.yaml index edc18364e..b72b239b9 100644 --- a/charts/lighthouse/Chart.yaml +++ b/charts/lighthouse/Chart.yaml @@ -3,6 +3,6 @@ description: | This chart bootstraps installation of [Lighthouse](https://github.com/jenkins-x/lighthouse). icon: https://raw.githubusercontent.com/jenkins-x/jenkins-x-website/master/images/logo/jenkinsx-icon-color.svg name: lighthouse -version: 0.1.0-SNAPSHOT +version: 1.21.0 home: https://github.com/jenkins-x/lighthouse - +appVersion: 1.21.0 diff --git a/charts/lighthouse/README.md b/charts/lighthouse/README.md index b05af9ed2..e2b7229f6 100644 --- a/charts/lighthouse/README.md +++ b/charts/lighthouse/README.md @@ -42,158 +42,162 @@ helm uninstall my-lighthouse --namespace lighthouse ## Values -| Key | Type | Description | Default | -| --------------------------------------------------- | ------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `cluster.crds.create` | bool | Create custom resource definitions | `true` | -| `configMaps.config` | string | Raw `config.yaml` content | `nil` | -| `configMaps.configUpdater` | object | Settings used to configure the `config-updater` plugin | `{"orgAndRepo":"","path":""}` | -| `configMaps.create` | bool | Enables creation of `config.yaml` and `plugins.yaml` config maps | `false` | -| `configMaps.plugins` | string | Raw `plugins.yaml` content | `nil` | -| `engines.jenkins` | bool | Enables the Jenkins engine | `false` | -| `engines.jx` | bool | Enables the jx engine | `true` | -| `engines.tekton` | bool | Enables the tekton engine | `false` | -| `env` | object | Environment variables | `{"JX_DEFAULT_IMAGE":""}` | -| `externalPlugins[0].name` | string | | `"cd-indicators"` | -| `externalPlugins[0].requiredResources[0].kind` | string | | `"Service"` | -| `externalPlugins[0].requiredResources[0].name` | string | | `"cd-indicators"` | -| `externalPlugins[0].requiredResources[0].namespace` | string | | `"jx"` | -| `externalPlugins[1].name` | string | | `"lighthouse-webui-plugin"` | -| `externalPlugins[1].requiredResources[0].kind` | string | | `"Service"` | -| `externalPlugins[1].requiredResources[0].name` | string | | `"lighthouse-webui-plugin"` | -| `externalPlugins[1].requiredResources[0].namespace` | string | | `"jx"` | -| `foghorn.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods | `{}` | -| `foghorn.image.pullPolicy` | string | Template for computing the foghorn controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `foghorn.image.repository` | string | Template for computing the foghorn controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-foghorn"` | -| `foghorn.image.tag` | string | Template for computing the foghorn controller docker image tag | `"{{ .Values.image.tag }}"` | -| `foghorn.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `foghorn.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods | `{}` | -| `foghorn.replicaCount` | int | Number of replicas | `1` | -| `foghorn.resources.limits` | object | Resource limits applied to the foghorn pods | `{"cpu":"100m","memory":"256Mi"}` | -| `foghorn.resources.requests` | object | Resource requests applied to the foghorn pods | `{"cpu":"80m","memory":"128Mi"}` | -| `foghorn.terminationGracePeriodSeconds` | int | Termination grace period for foghorn pods | `180` | -| `foghorn.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods | `[]` | -| `gcJobs.backoffLimit` | int | Set the backoff limit for failed cronJobs | `6` | -| `gcJobs.concurrencyPolicy` | string | Drives the job's concurrency policy | `"Forbid"` | -| `gcJobs.failedJobsHistoryLimit` | int | Drives the failed jobs history limit | `1` | -| `gcJobs.image.pullPolicy` | string | Template for computing the gc job docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `gcJobs.image.repository` | string | Template for computing the gc job docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-gc-jobs"` | -| `gcJobs.image.tag` | string | Template for computing the gc job docker image tag | `"{{ .Values.image.tag }}"` | -| `gcJobs.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `gcJobs.maxAge` | string | Max age from which `LighthouseJob`s will be deleted | `"168h"` | -| `gcJobs.schedule` | string | Cron expression to periodically delete `LighthouseJob`s | `"0/30 * * * *"` | -| `gcJobs.successfulJobsHistoryLimit` | int | Drives the successful jobs history limit | `3` | -| `git.kind` | string | Git SCM provider (`github`, `gitlab`, `stash`) | `"github"` | -| `git.server` | string | Git server URL | `""` | -| `githubApp.enabled` | bool | Enables GitHub app authentication | `false` | -| `githubApp.username` | string | GitHub app user name | `"jenkins-x[bot]"` | -| `hmacSecretName` | string | Existing hmac secret to use for webhooks | `""` | -| `hmacToken` | string | Secret used for webhooks | `""` | -| `hmacTokenEnabled` | bool | Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud | `true` | -| `image.parentRepository` | string | Docker registry to pull images from | `"ghcr.io/jenkins-x"` | -| `image.pullPolicy` | string | Image pull policy | `"IfNotPresent"` | -| `image.tag` | string | Docker images tag the following tag is latest on the main branch, it's a specific version on a git tag | `"latest"` | -| `jenkinscontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | -| `jenkinscontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `jenkinscontroller.image.repository` | string | Template for computing the Jenkins controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller"` | -| `jenkinscontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | -| `jenkinscontroller.jenkinsToken` | string | The token for authenticating the Jenkins user | `nil` | -| `jenkinscontroller.jenkinsURL` | string | The URL of the Jenkins instance | `nil` | -| `jenkinscontroller.jenkinsUser` | string | The username for the Jenkins user | `nil` | -| `jenkinscontroller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `jenkinscontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | -| `jenkinscontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | -| `jenkinscontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | -| `jenkinscontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | -| `jenkinscontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | -| `jenkinscontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | -| `jenkinscontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | -| `keeper.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods | `{}` | -| `keeper.datadog.enabled` | string | Enables datadog | `"true"` | -| `keeper.env` | object | Lets you define keeper specific environment variables | `{}` | -| `keeper.image.pullPolicy` | string | Template for computing the keeper controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `keeper.image.repository` | string | Template for computing the keeper controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-keeper"` | -| `keeper.image.tag` | string | Template for computing the keeper controller docker image tag | `"{{ .Values.image.tag }}"` | -| `keeper.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `keeper.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `keeper.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods | `{}` | -| `keeper.podAnnotations` | object | Annotations applied to the keeper pods | `{}` | -| `keeper.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `keeper.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `keeper.replicaCount` | int | Number of replicas | `1` | -| `keeper.resources.limits` | object | Resource limits applied to the keeper pods | `{"cpu":"400m","memory":"512Mi"}` | -| `keeper.resources.requests` | object | Resource requests applied to the keeper pods | `{"cpu":"100m","memory":"128Mi"}` | -| `keeper.service` | object | Service settings for the keeper controller | `{"externalPort":80,"internalPort":8888,"type":"ClusterIP"}` | -| `keeper.statusContextLabel` | string | Label used to report status to git provider | `"Lighthouse Merge Status"` | -| `keeper.terminationGracePeriodSeconds` | int | Termination grace period for keeper pods | `30` | -| `keeper.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods | `[]` | -| `lighthouseJobNamespace` | string | Namespace where `LighthouseJob`s and `Pod`s are created | Deployment namespace | -| `logFormat` | string | Log format either json or stackdriver | `"json"` | -| `logService` | string | The name of the service registered with logging | `""` | -| `logStackSkip` | string | Comma separated stack frames to skip from the log | `""` | -| `oauthSecretName` | string | Existing Git token secret | `""` | -| `oauthToken` | string | Git token (used when GitHub app authentication is not enabled) | `""` | -| `poller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods | `{}` | -| `poller.contextMatchPattern` | string | Regex pattern to use to match commit status context | `""` | -| `poller.datadog.enabled` | string | Enables datadog | `"true"` | -| `poller.enabled` | bool | Whether to enable or disable the poller component | `false` | -| `poller.env` | object | Lets you define poller specific environment variables | `{"POLL_HOOK_ENDPOINT":"http://hook/hook/poll","POLL_PERIOD":"20s"}` | -| `poller.image.pullPolicy` | string | Template for computing the poller controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `poller.image.repository` | string | Template for computing the poller controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-poller"` | -| `poller.image.tag` | string | Template for computing the poller controller docker image tag | `"{{ .Values.image.tag }}"` | -| `poller.internalPort` | int | | `8888` | -| `poller.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `poller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `poller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods | `{}` | -| `poller.podAnnotations` | object | Annotations applied to the poller pods | `{}` | -| `poller.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `poller.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `poller.replicaCount` | int | Number of replicas | `1` | -| `poller.requireReleaseSuccess` | bool | Keep polling releases until the most recent commit status is successful | `false` | -| `poller.resources.limits` | object | Resource limits applied to the poller pods | `{"cpu":"400m","memory":"512Mi"}` | -| `poller.resources.requests` | object | Resource requests applied to the poller pods | `{"cpu":"100m","memory":"128Mi"}` | -| `poller.terminationGracePeriodSeconds` | int | Termination grace period for poller pods | `30` | -| `poller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods | `[]` | -| `scope` | string | set scope to either `cluster` or `namespace` for permissions | `cluster` | -| `tektoncontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | -| `tektoncontroller.dashboardTemplate` | string | Go template expression for URLs in the dashboard if not using Tekton dashboard | `""` | -| `tektoncontroller.dashboardURL` | string | the dashboard URL (e.g. Tekton dashboard) | `""` | -| `tektoncontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `tektoncontroller.image.repository` | string | Template for computing the tekton controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-tekton-controller"` | -| `tektoncontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | -| `tektoncontroller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `tektoncontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | -| `tektoncontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | -| `tektoncontroller.replicaCount` | int | Number of replicas | `1` | -| `tektoncontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | -| `tektoncontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | -| `tektoncontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | -| `tektoncontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | -| `tektoncontroller.enableRerunStatusUpdate` | bool | Enable updating the status at the git provider when PipelineRuns are rerun | `false` | -| `tektoncontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | -| `user` | string | Git user name (used when GitHub app authentication is not enabled) | `""` | -| `webhooks.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods | `{}` | -| `webhooks.customDeploymentTriggerCommand` | string | deployments can configure the ability to allow custom lighthouse triggers using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing | `""` | -| `webhooks.image.pullPolicy` | string | Template for computing the webhooks controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `webhooks.image.repository` | string | Template for computing the webhooks controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-webhooks"` | -| `webhooks.image.tag` | string | Template for computing the webhooks controller docker image tag | `"{{ .Values.image.tag }}"` | -| `webhooks.ingress.annotations` | object | Webhooks ingress annotations | `{}` | -| `webhooks.ingress.enabled` | bool | Enable webhooks ingress | `false` | -| `webhooks.ingress.hosts` | list | Webhooks ingress host names | `[]` | -| `webhooks.ingress.ingressClassName` | string | Webhooks ingress ingressClassName | `nil` | -| `webhooks.labels` | object | allow optional labels to be added to the webhook deployment | `{}` | -| `webhooks.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `webhooks.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `webhooks.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods | `{}` | -| `webhooks.podAnnotations` | object | Annotations applied to the webhooks pods | `{}` | -| `webhooks.podLabels` | object | | `{}` | -| `webhooks.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `webhooks.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `webhooks.replicaCount` | int | Number of replicas | `1` | -| `webhooks.resources.limits` | object | Resource limits applied to the webhooks pods | `{"cpu":"100m","memory":"512Mi"}` | -| `webhooks.resources.requests` | object | Resource requests applied to the webhooks pods | `{"cpu":"80m","memory":"128Mi"}` | -| `webhooks.service` | object | Service settings for the webhooks controller | `{"annotations":{},"externalPort":80,"internalPort":8080,"type":"ClusterIP"}` | -| `webhooks.serviceName` | string | Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out | `"hook"` | -| `webhooks.terminationGracePeriodSeconds` | int | Termination grace period for webhooks pods | `180` | -| `webhooks.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods | `[]` | +| Key | Type | Description | Default | +|-----|------|-------------|---------| +| `cluster.crds.create` | bool | Create custom resource definitions | `true` | +| `configMaps.config` | string | Raw `config.yaml` content | `nil` | +| `configMaps.configUpdater` | object | Settings used to configure the `config-updater` plugin | `{"orgAndRepo":"","path":""}` | +| `configMaps.create` | bool | Enables creation of `config.yaml` and `plugins.yaml` config maps | `false` | +| `configMaps.plugins` | string | Raw `plugins.yaml` content | `nil` | +| `engines.jenkins` | bool | Enables the Jenkins engine | `false` | +| `engines.jx` | bool | Enables the jx engine | `true` | +| `engines.tekton` | bool | Enables the tekton engine | `false` | +| `env` | object | Environment variables | `{"JX_DEFAULT_IMAGE":""}` | +| `externalPlugins[0].name` | string | | `"cd-indicators"` | +| `externalPlugins[0].requiredResources[0].kind` | string | | `"Service"` | +| `externalPlugins[0].requiredResources[0].name` | string | | `"cd-indicators"` | +| `externalPlugins[0].requiredResources[0].namespace` | string | | `"jx"` | +| `externalPlugins[1].name` | string | | `"lighthouse-webui-plugin"` | +| `externalPlugins[1].requiredResources[0].kind` | string | | `"Service"` | +| `externalPlugins[1].requiredResources[0].name` | string | | `"lighthouse-webui-plugin"` | +| `externalPlugins[1].requiredResources[0].namespace` | string | | `"jx"` | +| `foghorn.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods | `{}` | +| `foghorn.image.pullPolicy` | string | Template for computing the foghorn controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `foghorn.image.repository` | string | Template for computing the foghorn controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-foghorn"` | +| `foghorn.image.tag` | string | Template for computing the foghorn controller docker image tag | `"{{ .Values.image.tag }}"` | +| `foghorn.logLevel` | string | | `"info"` | +| `foghorn.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods | `{}` | +| `foghorn.replicaCount` | int | Number of replicas | `1` | +| `foghorn.resources.limits` | object | Resource limits applied to the foghorn pods | `{"cpu":"100m","memory":"256Mi"}` | +| `foghorn.resources.requests` | object | Resource requests applied to the foghorn pods | `{"cpu":"80m","memory":"128Mi"}` | +| `foghorn.terminationGracePeriodSeconds` | int | Termination grace period for foghorn pods | `180` | +| `foghorn.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods | `[]` | +| `gcJobs.backoffLimit` | int | Drives the job's backoff limit | `6` | +| `gcJobs.concurrencyPolicy` | string | Drives the job's concurrency policy | `"Forbid"` | +| `gcJobs.failedJobsHistoryLimit` | int | Drives the failed jobs history limit | `1` | +| `gcJobs.image.pullPolicy` | string | Template for computing the gc job docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `gcJobs.image.repository` | string | Template for computing the gc job docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-gc-jobs"` | +| `gcJobs.image.tag` | string | Template for computing the gc job docker image tag | `"{{ .Values.image.tag }}"` | +| `gcJobs.logLevel` | string | | `"info"` | +| `gcJobs.maxAge` | string | Max age from which `LighthouseJob`s will be deleted | `"168h"` | +| `gcJobs.schedule` | string | Cron expression to periodically delete `LighthouseJob`s | `"0/30 * * * *"` | +| `gcJobs.successfulJobsHistoryLimit` | int | Drives the successful jobs history limit | `3` | +| `git.kind` | string | Git SCM provider (`github`, `gitlab`, `stash`) | `"github"` | +| `git.server` | string | Git server URL | `""` | +| `githubApp.enabled` | bool | Enables GitHub app authentication | `false` | +| `githubApp.username` | string | GitHub app user name | `"jenkins-x[bot]"` | +| `hmacSecretName` | string | Existing hmac secret to use for webhooks | `""` | +| `hmacToken` | string | Secret used for webhooks | `""` | +| `hmacTokenEnabled` | bool | Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud | `true` | +| `hmacTokenVolumeMount` | object | Mount hmac token as a volume instead of using an environment variable Secret reference | `{"enabled":false}` | +| `image.parentRepository` | string | Docker registry to pull images from | `"ghcr.io/jenkins-x"` | +| `image.pullPolicy` | string | Image pull policy | `"IfNotPresent"` | +| `image.tag` | string | Docker images tag the following tag is latest on the main branch, it's a specific version on a git tag | `"latest"` | +| `jenkinscontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | +| `jenkinscontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `jenkinscontroller.image.repository` | string | Template for computing the Jenkins controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller"` | +| `jenkinscontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | +| `jenkinscontroller.jenkinsToken` | string | The token for authenticating the Jenkins user | `nil` | +| `jenkinscontroller.jenkinsURL` | string | The URL of the Jenkins instance | `nil` | +| `jenkinscontroller.jenkinsUser` | string | The username for the Jenkins user | `nil` | +| `jenkinscontroller.logLevel` | string | | `"info"` | +| `jenkinscontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | +| `jenkinscontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | +| `jenkinscontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | +| `jenkinscontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | +| `jenkinscontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | +| `jenkinscontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | +| `jenkinscontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | +| `keeper.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods | `{}` | +| `keeper.datadog.enabled` | string | Enables datadog | `"true"` | +| `keeper.env` | object | Lets you define keeper specific environment variables | `{}` | +| `keeper.image.pullPolicy` | string | Template for computing the keeper controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `keeper.image.repository` | string | Template for computing the keeper controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-keeper"` | +| `keeper.image.tag` | string | Template for computing the keeper controller docker image tag | `"{{ .Values.image.tag }}"` | +| `keeper.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `keeper.logLevel` | string | | `"info"` | +| `keeper.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods | `{}` | +| `keeper.podAnnotations` | object | Annotations applied to the keeper pods | `{}` | +| `keeper.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `keeper.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `keeper.replicaCount` | int | Number of replicas | `1` | +| `keeper.resources.limits` | object | Resource limits applied to the keeper pods | `{"cpu":"400m","memory":"512Mi"}` | +| `keeper.resources.requests` | object | Resource requests applied to the keeper pods | `{"cpu":"100m","memory":"128Mi"}` | +| `keeper.service` | object | Service settings for the keeper controller | `{"externalPort":80,"internalPort":8888,"type":"ClusterIP"}` | +| `keeper.statusContextLabel` | string | Label used to report status to git provider | `"Lighthouse Merge Status"` | +| `keeper.terminationGracePeriodSeconds` | int | Termination grace period for keeper pods | `30` | +| `keeper.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods | `[]` | +| `lighthouseJobNamespace` | string | Namespace where `LighthouseJob`s and `Pod`s are created | Deployment namespace | +| `logFormat` | string | Log format either json or stackdriver | `"json"` | +| `logService` | string | The name of the service registered with logging | `""` | +| `logStackSkip` | string | Comma separated stack frames to skip from the log | `""` | +| `oauthSecretName` | string | Existing Git token secret | `""` | +| `oauthToken` | string | Git token (used when GitHub app authentication is not enabled) | `""` | +| `oauthTokenVolumeMount` | object | Mount Git token as a volume instead of using an environment variable Secret reference (used when GitHub app authentication is not enabled) | `{"enabled":false}` | +| `poller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods | `{}` | +| `poller.contextMatchPattern` | string | Regex pattern to use to match commit status context | `""` | +| `poller.datadog.enabled` | string | Enables datadog | `"true"` | +| `poller.enabled` | bool | Whether to enable or disable the poller component | `false` | +| `poller.env` | object | Lets you define poller specific environment variables | `{"POLL_HOOK_ENDPOINT":"http://hook/hook/poll","POLL_PERIOD":"20s"}` | +| `poller.image.pullPolicy` | string | Template for computing the poller controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `poller.image.repository` | string | Template for computing the poller controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-poller"` | +| `poller.image.tag` | string | Template for computing the poller controller docker image tag | `"{{ .Values.image.tag }}"` | +| `poller.internalPort` | int | | `8888` | +| `poller.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `poller.logLevel` | string | | `"info"` | +| `poller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods | `{}` | +| `poller.podAnnotations` | object | Annotations applied to the poller pods | `{}` | +| `poller.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `poller.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `poller.replicaCount` | int | Number of replicas | `1` | +| `poller.requireReleaseSuccess` | bool | Keep polling releases until the most recent commit status is successful | `false` | +| `poller.resources.limits` | object | Resource limits applied to the poller pods | `{"cpu":"400m","memory":"512Mi"}` | +| `poller.resources.requests` | object | Resource requests applied to the poller pods | `{"cpu":"100m","memory":"128Mi"}` | +| `poller.terminationGracePeriodSeconds` | int | Termination grace period for poller pods | `30` | +| `poller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods | `[]` | +| `scope` | string | limit permissions to namespace privileges | `"cluster"` | +| `tektoncontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | +| `tektoncontroller.dashboardTemplate` | string | Go template expression for URLs in the dashboard if not using Tekton dashboard | `""` | +| `tektoncontroller.dashboardURL` | string | the dashboard URL (e.g. Tekton dashboard) | `""` | +| `tektoncontroller.enableRerunStatusUpdate` | bool | Enable updating the status at the git provider when PipelineRuns are rerun | `false` | +| `tektoncontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `tektoncontroller.image.repository` | string | Template for computing the tekton controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-tekton-controller"` | +| `tektoncontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | +| `tektoncontroller.logLevel` | string | | `"info"` | +| `tektoncontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | +| `tektoncontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | +| `tektoncontroller.replicaCount` | int | Number of replicas | `1` | +| `tektoncontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | +| `tektoncontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | +| `tektoncontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | +| `tektoncontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | +| `tektoncontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | +| `user` | string | Git user name (used when GitHub app authentication is not enabled) | `""` | +| `webhooks.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods | `{}` | +| `webhooks.customDeploymentTriggerCommand` | string | deployments can configure the ability to allow custom lighthouse triggers using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing | `""` | +| `webhooks.image.pullPolicy` | string | Template for computing the webhooks controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `webhooks.image.repository` | string | Template for computing the webhooks controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-webhooks"` | +| `webhooks.image.tag` | string | Template for computing the webhooks controller docker image tag | `"{{ .Values.image.tag }}"` | +| `webhooks.ingress.annotations` | object | Webhooks ingress annotations | `{}` | +| `webhooks.ingress.enabled` | bool | Enable webhooks ingress | `false` | +| `webhooks.ingress.hosts` | list | Webhooks ingress host names | `[]` | +| `webhooks.ingress.ingressClassName` | string | Webhooks ingress ingressClassName | `nil` | +| `webhooks.ingress.tls.enabled` | bool | Enable webhooks ingress tls | `false` | +| `webhooks.ingress.tls.secretName` | string | Specify webhooks ingress tls secretName | `""` | +| `webhooks.labels` | object | allow optional labels to be added to the webhook deployment | `{}` | +| `webhooks.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `webhooks.logLevel` | string | | `"info"` | +| `webhooks.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods | `{}` | +| `webhooks.podAnnotations` | object | Annotations applied to the webhooks pods | `{}` | +| `webhooks.podLabels` | object | | `{}` | +| `webhooks.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `webhooks.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `webhooks.replicaCount` | int | Number of replicas | `1` | +| `webhooks.resources.limits` | object | Resource limits applied to the webhooks pods | `{"cpu":"100m","memory":"512Mi"}` | +| `webhooks.resources.requests` | object | Resource requests applied to the webhooks pods | `{"cpu":"80m","memory":"128Mi"}` | +| `webhooks.service` | object | Service settings for the webhooks controller | `{"annotations":{},"externalPort":80,"internalPort":8080,"type":"ClusterIP"}` | +| `webhooks.serviceName` | string | Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out | `"hook"` | +| `webhooks.terminationGracePeriodSeconds` | int | Termination grace period for webhooks pods | `180` | +| `webhooks.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods | `[]` | You can look directly at the [values.yaml](./values.yaml) file to look at the options and their default values. diff --git a/charts/lighthouse/lighthouse-1.21.0.tgz b/charts/lighthouse/lighthouse-1.21.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..4e6888e29a5717676ac2d719e3eeaadb50c1299b GIT binary patch literal 16065 zcmX|oV{|7^*lcXuw(X5=+uqo=ZQHhOCmTB(+u9hvoB#XXd*{P^=rhyj^qJ|ddaCLn zjD|u3`tJfz1JN2uDl?f#%5lhgaaOp;pR|KSLcw^u(2|-Gxbzevi~J% zW@8I<)$8rN)$u^(@x59;pf+16WnPvrCf4PpV} zsD{aKf;I}k54pVYT-+hI{iY>min&f6wh_}&X96h2VX%;gu*;dufh1yvnLKhU`SKkInvLo)4f&YbfjFlEjA&1qY8#MbGn^2wiB7(F2!I$z&U{%e8y#d0HOyy0 zQ;FRSh!qJrFX^R-JnFKJh~$#IQ%DIWdpf*y?_@ys40ED5qJK?IpmR9pkc!EHIkQl0 z$_DwB7dn~ROCm|qt?4~cBoI!JDcSSrBBX#3qbvm4lnI0?%Sf7po5noBScp~e3`opG zROyI*BTpVD{5ik_f43g{p?iap7@UC0~Ny&V#yz9dS&TA3phmmG}T86G0{=p{B1JtmLE zS{T`D<~()ENKTUAzhDpH30r2tJC4D6sbOp2fz zHyHqu+__SIBmlgBxxV{APs7n&s_p%lJSLgCAR;lu3+mM}hLrAvEcsXs_?K!)Ksi^} z=SD2C7_xu_vrz`B7h^ii!8TFVR`L$)`ym;e8Z?DY5x8LI5fT_`yc_f6s0=7#cp$L0 z$N^J=mS}UF6jqWBGn|FfM|?4Ix&2F9aI|x-Lo)9 z2ZkwJh$01tU4G~;CS*O?)t7ds@?!L%v_dP*1ZpR47u8Q3Sp?bs7wV%3GlX|m14fj2 z(Qp5+{OUDI0YYEC-X5=yo3lTWctmG$(XMoSu*pCYHtQEatyP|WkP>}+MIliL#b`NR-<-bQ7`#;=GKwZOyoetuJ5XhWR|6YhT=stUm@Ir-0$90a z*pSQjMJKfIy>g7)St0&ju@cG^5MJygLPsduCUAtZu=@a(6IPobgayaDg0~RdChDA-(c}pMz@P<$jq3>jQ;aIqoM(|~P#ke}`1;p#D%wA*i4kPd$H-3 zIIy#1kyarbDcX_&!;uLciQYrOJh zB_d$nMEsRvc;`}PJK#Vt4+P1`k2!xd}&1lch+ zk>K>7rb&57O$V8Cjh-b(sG&tf3GD*PCpPVhF2_CSTVdf}QQpTy`qH9O$NU4Ekm3}n z#u3pLQHhj=phXG<97xi*&ORnE`{6GF^3Op>?$S5H_og|CzmWjTJ{)(WBLYz*L&x*U z9)BdT#);dQ%!=DI*CFbTUtM_J$*e|zGKr-d^EvEju3#5TARQV3LvWJ4lAEXrhsX%b z{g7xv;v{XPq8nr(suxgA@Y$FolIT!L4z=+MU~qv2lT!JcrB(TGjtkea2X$v3cnw)G zpZZW=fkuxJDNP6L$lV$8-P5(ZBr`&vua)7JUpoSu*?X z;XB7Wh>&4%zU$Q{fmbHeL3<=Z(h=0@QNFb$_e4;!Q1a=o!E*-*9wmPfAeTIeQfw6F z8oU6+i%e*CooP2rXR={Z_2C6kK@ZdLp%!hFgX{VzS%GV*n=iw|0U@%ModZn`Q^e8* zSYpMM)q%~xcpbx5ns>Q_KzgdfC!wQHgX2l7rRR_6D&v7gNPiXcI8T+bK=5Ii$s$ZT zLbvJ;N2a~B+%TpW0&5Uuiqc%*Ae3`b>)%Gk_FoDL~_{}x?=zOIz*_IAjCUgdZW zJ+g`p3LU*j2`DEXNxe8s*;kIr&wjpLt(CKn70M-iDpf+~RS}SrTd(8cl-br0r(?>{QLy#r zpW$Vv@XDl8>*-pdUt~S8Um<#5P{f#wFFZYMbuj%qhVqj7Y#5AhZRquHy2{iQ2ZlOz z^1Qz^I$DI*y{(;xhx9!wH7m-6nkZE!y_%Y;wIls8^E}}Np(ShFqV#m*+HCCHsSQw6 z`BEwD;+sJh#VARk&DFb>z*0nL0<9}+^%un~UGYc1#q<0%L6x#&^=Z2K&4RKx)L}Gz zm;#zwkZB6dIbuL+SlUsUTf&Pg6x)m5DcVg|eZyNKE&!X_wqJp((D zuI_X8P)+|0BRP!jKc{`i4i8)RfOAfP;Tc+WP15eiUSH^3xY85Qb&sMiGB_pEQD#-> z+RbH3gJBJ?nLUzEGCS}~Dg__}LDIb7?wQ6^xs;Vk%wfc(UR^sqbFosbQMW>xJqgQ* zJ=*3{hx+l%W28;4)q$yvH}#rJ8)Q;Cl^)^(Ry$23HSA60Trzm`ewEl%=8v-~$$vCY zDfCj%Zt>o%J(lu%|Ylo6j0aL0yFjJnv8juW`1{Gl*5v*LdEnGqivZ`Msgvf+SgaG zh~;55h<#>Y%ZGK=TOo_}!D%Xc_p}sI)~($vOI~B#gqA$Be2wa-ZfiP>X#0d(x2<)Z?7y0xp5$XW_+?gq6NVFMsY07rZ#a5cBXDz6n zjEtZKy}~j`H5iqVtR8xU8|#F=ZARuKfu}fmOt)I6sE7Uast2TLvVfQBm))5{nXQi- z*`0>uWNE7R;P2YNw8qU;U@x&CGa!uwXh}BVQaH-E#!fl=5#?rVHZRUt|wnv?glK2B#Rm| z1-C7@*>2F2?IGw_BV@M0EwO z!t-H{bWY}Oo7Xd*!17ACF05h`I7%f^d*Qhq~lq?eR4MsEjYHL_VKLOWLJBlf!^cW^1zc_?o#K7BtRP`5zcRQ4S+%A6iz zBp)O-XrizRDJlQ$&n{#XLv{J!fnR87{R-H*(dYiY9Gvb=*ZJ;`{C6yKE5S6pR0x;H zJVt!*!uKA4e05Tf4op!WMwlvC$X$oWfO6$eTBp8*|93eZ?pVkTX{k4D-@#}unfb7IMZhCd=R%E9Q&j2w zX1E$FswOeQ2A(aFuG(GLqi}qL+tcPgU*Ez<=-FV61|IvC3pvbc9Az6=zpXi}M3WsE z*J8MHL(yBic3YpM#mkkh20!>HIXKpGtc;(46~$|mY=TEHMhmU1R2cVTbes1e&Q}tb zE+*s-yUmVpjyWW4LnBn>+OK<3R|cT1gIz0=s;X!Y8?ffA4#S=sRy?m#?{5=x9`aX} zJ(r$zAsRY9-MQ9xZvdQ0BQcj>>+eXRFWTj_rO37JHo=FgxiFGtSnI&DP>cz|Iwc|= z$=F5hXwF~_aJo4oz@hvo!m=EV_nI_J&DYtbnSSzA9t6mFGNkq!65wZ{%^9C>!dElW zYZ(gk7hh8gbA_a-EeAwM?6IZP*@B@`=zWZjLWxso7h z&J$@1g$NX+2$G}V@Z+!~{3K`^n^BPp7`Y{<^A66$c`hvilO}9KsJb7RdxhQgGhV59 zw^pJP6ScOkvFs0!#5t32whfgSF4T3&7wpP&F;M|i5hpe+`eI7e+c`|SR){K&1yRhi zNr`;23R^5?6F#l7p;Vn;vf|YmQ}E?VqbMAmFhmmVcl_6qo`d75{2-j93XWIBS7ZiS z>bjNdxf?`4EMBhT1hH>OGS?tXVt$bEyp1p_I}1pUirPtOxOnbKXA~suiHXB8VeDdT z2}kk~_Hh3MP(_(p3Xe~DLRIXJZ>S!3Ex38XcZv|Sd4k3jS(1&~m(p2u{m^oFoIXlW zk#`dRsrjABNuFu=_mclB3npDi^89RNvj|C;<2foDj?9@GlVHDx>4b*&I%xITkX5N5 zT;kr5(rGVoYVqfPo_c+ed_3x3{#{@~z|9a?eDmG@)!SS2+0J`rf9t;`tF7t45@S4l zcqk>MVg_AKDoQT@jhGY0Jx!x8r<9aV&~43;l=Em{9nIq@JC@23^mqy==l z5Ne1Jcn?*jtyDz##Om3g7@|yK6N0ar!%SMTCB-}&i06|s)S4OrX;W2Y*^R*~sZnM* zH>n~83R_I*M~&fedCAy-f&s;4$3r-lwX5r4P&RASb zIoLZ0&e6DWh~QL;Hu24rN^@?>&i8KWWRD6|Z7Lsswx+9hcl!LdIru?{ZqMm>%uUld zvlL|l802UD1-R|Oq<2@&m{b}VMc6hABkt1U0ew6?yxm<~-24KaJzigJAnppI@aFGr z8d&4Mu!X}~Yvrnn)d?ACE=*OWcH%AP$WDLxK0S4Heco+NepELXPaEV!G{0jdC5f&i zu3Sv+%2iR^b18nxPP%A%AnD}!_4akt zg5cMMWolO>7t#0rb@X{~9sZI!u&sl~IWb*)xLe2Ix5WR&E|<9dXxVTNZQ5V>!G>#D zY`H2w4i%~tFd%S<>(USUjXh@w6()eanVabi=KEGOl`?KJ8wQYtlE5)8ZKI?&=VI9q zjzO2KWFAN-203e3BZAD=<;i#^a zurt?_`5kP`-~}tY%{SGE_3?1^ck&9OQc#N@k(AeGzSeI4q>T^aZ z@;CXx(ZQ6UxPmqGnta7i18(1L&iWG86Ptz*^@Jp(^KGP@jAa2)K#|G1X z*q*vOta=@jTGb6Wpk7yb8?spf0I7)80pN?z{`cVY<2OK)!1|AnO=c4I>u5wln9RbI z=lV(Q()MyFPu_2XCprECO~D)Ay(NUwZw?9noCNsb0f5B%bco?(Z(KYL>1|b7ur3=J z#J6R^LQ|+*X7y0sb>9=A$^DEGq1+N-aF0`YQOQMoNpaH{e#l|V_2(D>XD9lNdqrS( z+%F+5k6dYr*2LBCf2D1!vUHRK*d#1J^xYZx+}*moO+5wNtbUuYPUc*Y5`!E;5RIpZ z_9rQA|6_dh;EoX}Nff!zby;I(vkRm=u3r8-K$@7Y7)5fs!xhKa8KRYmHpDza{2&%* zz;qb0lYc;c!T@xW12bj8(nW{mkdcN&dbXW#-+|Kbw!Ms#br[zy@60pQIva(nf zrlT{;9dkfe+ZLp1CIfPDE%g^kdYn1#5x-e1a7mhM1;qQEM&~l68Pp4vMXYzuow}ID z%yBE$G72{{7VSY>ofIOCyNwhaORbEwt5ybr?DsEBHd1P|oS|+6IMqfHF%=JYv<~UC z8AZWsV(JPeVkHlnSt(%48S$Bv+c2k(Pd4nRXZHx^1;z`X35n*s`$p7@S9|B^iZ61Qwl#0IQkmsRQZ#HYYt{FGC`w>qFdb9p34WAw;zb5R{~SIP5)Hv#j?D?jM6V}eBp&AC#L z;YdnPMwG*K_#hnXJCo?x4nYuBat>F@&0PO)8!oyJc=50=nrGSU<^7d@?&B^3czY79 z$DdCDtbfzryWQQ4?T`MXmHZLYJbWXkqaHg6^{=`pHud*|PV)Qg=z1%^dLa0y^0!J8 zL>bc&?yPRx0Wc&>Y=m?dSUUl$VGIZGM491!o@i71{hGLZoQTIZiqD>V(dJ@DjeF5z zJ(JZ$>@T$TP6V1X2@ek;VMUTS?658I18`yMb9O0j9)2)P(=4C$o(lp`xJo5B#3C;I z;)x<*;Cg7a&!=vOsSlzSF(Us|>>+-4WjsZFi!c}FbmrWMprFkqHw$I$W(BhHExcgz=hf^KoY)9IgNu7gk+4F2i(p^fd2lKit zf6-%<}4(w#=$(j=*qaRpz%Fd8s;x<@7vPDuJ=dq>`ljKpvr_&!QA#T8$YE{MDd7oWfRY8tg_0NBoM{{)&Wm(xS z;4e5qg#5n0X-n0-EkzA%Z(G=2it3;VzBhjR=cQe7KN&^wxEV{Np#vg+HXot{>M zPL%DCqVcS(q2Lvoa5weU64lVngih_z%nB3S_q(}aN(ry5;-q;Cp-=Dmc!|4BlovoH z-dCxGJvW?2T3QV*5G$Zde(n=9LFd-LNZXIH9|vwc^Mv~h{TJ6YLHYTe><|FNIYdhpBXY(K04*yTWQ z9@h9U&VK4R(l8iix_?at{|#61#*I_Uo$A)W*@~ED-Eje$Bxl~zZ{Bfp#jg76R$Z-K z%h5(>Yp$_5Qssi2z1Ty&+xK?-kd8>mC|~Et%gxQpjoH8`ps$PX@8R$HAds$6E&<@@ zuKj(e>XlI5$wQLx_m}Fd(F%j$zN%JhPDUJb3)AAQH zqQjQ7t@GyCFrwF-^e{w@f?6}KItjiNKfJ4dEf4G`~7K= z0Cujwc>o)-UpcPzwcOg>QuQLhJt_G0@TZO4W+uOHJ`EIJ>$N`&BtFA~2RXh!#1}y| zUwA0`SkBRfRR){XCY7$2or;gX)md)A2UzaeWp!3G%$BPBtc$gu5^y@z@!XwVhL6h{ zqQp}RFN?L%YnkJeB1ltwr>2G7hHHGDakE5crOi$zC3`mCOsD- z9`|}WfaQ#(PC6o%>VV~%dK~1gW`z&A>Hg6xFq;pcVH~5(Z}(Zi=Wzsy2P!I_DV6r1+BeA+u&^u#PfYS{`;QdVKzbD5EXCQ&+BjN zS!VuSZ9R~d`^rOK1K7SseAjvNCq1v60n9y&6|~BKS|#{jVjIZLzA?^`tYHFm*nJb? zX@3BQdxNh(cSRRjw_@&eIKs3?Asal%zQ4SIrruROflksu6;`(9E1Rixk@UXngvQ?<(WoL4(--0^8ivWT?J1gp&kw7~z5&#vg#%L!B6; z2jP$O+Y6XM>MhM64d|T6{I7DqX!E<81Ybe6_$xzfku>Hk?AmbeBHd?3h+cqMXPh;0 zL3D(DkHAn0D}daldUsS=SEVjc0m;oBzGpo>Ps|m?2O@1w^`rv_%HJDA zV4;zqSGp@J!+U7I6xt>==x$cXvvY$ff4{r`0-?#_2Z+#t!;B=IG;O)Ok;E;~gs;pNpk} z`)xJ6<_>ir$uts2*zbB>)LPjNlgYwMJ~>i%gxa+Lm6L9_)uyo{$Er8Y%cQDnbXl%8 zI8~YITldAM9v`RX2@^>VDf*GOFSb&JZZ}jy|LAi7ywMTMyOc2XM>pm@pAQZ?d;<4> z4Go*aH8zz(d4b8iT(Kfx6Pm$NDZMyYs4zqmdbuv&E)IxK%{+N2hs!?0SO_GCp^%;T zN0%ScD@$OZjc<~|yB;ZehWwB|R{RId4>!H@BMccFqkV5Q6brrg6BKOBQiiYn=5}tu za#38PLIw6LYW8-pySJ#mNOk2r!$KTQSO{fQ2TA@G_21c^b0H!L?{W%{`E&qJi=&Qd zLN+OG|0!>lFV(BLmVTbBdb6WomT@nN!RW6he=$oYG=t-C9W*b~Eav&jE{$gz=Up&> z5;o0M31`=L_i_Kqtq;?Z&zpTiyUr>ALBH?s7+_15MhdU$RL=U;l`CRA-sLnn*1ay+ zBS(Dcn*eV_&vx^#kZyXN6HTiBcA2aZ^vSO19RZ(p8zUN+gRCC)bj=6EOs|&nq zac=jy@y__)55LE4Hx>BJKrf$?6*{-NK96$m(p*p6Ky)_Kx1g{0KabKcf6zxSIll*Y zv&y#nDS7vO5-|F8BqL{|C&5iAvn2m|yuEOL(3@bOL2WOYy9>!323XKk>Wv zBB%#Ofk|12@6^1~wj?N(6un~@qC(t&%to+`;k7#iKH?g-z0C-e1Bs(xl6`c#4nhqR zg|jBAd4>Z>n6)YkTu{?~j`Q;6&85BrZw@vYiMK9V$2uUq!^QURCJ5iibFU*Un8*5< z*Scty`*(BYDA??_Zpcif0laQ`l$hc?LKw@5VVAQc4c(^X^c zR)G7oKg!KSAJCjct@-D3*Poke7tqYhwZ7?ZOSS?bsvZ879@JK;6QKLd`AC&vjcShW zq8Uza*Tdc~R5jCq*5YMS{5CJN5kL!Ws_&ty<~=P+!_vxWsFYpiC5@|1yaLK{ooF0I z$`5hv-}t}0x;BhXvaNJ6TAaM3cQINFE{z6=om0=sxKov8?|=P3dEw#w6ZlWM)A7e8 zPlrYM-w;BzPW2r`&NKwS+~IDg|qWQIe6jq=$yKZgC6JjWWsY@ z$8(J*z~|5x~JDmwF_{yD=-j6FbaDeIu0~PW||Yk_RKA4 zS0Wf?HIGwby7J$y^2G+cg zH6$3hmFQn^`L1_DQ)C4|eyo1?1DxD(0rsT71K;{HBkScsq)&lLfo8xnORmtJV~ z0Ll(7MCPEX7&n}}?2_>)G$JybI~iy5(^?sZ!S0&N8sMx9Y9rX6HOFxCIciQ~56u`f zVkz=E)YW+lK~=}<^`^|~%?q{=i%#~X`)U1z@?T7q!~lbHs*2~&@e{z-DbyQPYZjm;EVT9?!@hp4-+HyrmV(U%(BTkyHJ|dQ{i+J>(;~WPQXf<7ysG}u zk;~%MUS%r-*;=N4r&x+;*@Oi!RAuVb)tb4w=*?W5;H~`vM7JGmo_!TQ`+p4&+IfBn z@U7nfe#(GX!kOrA&4^^nX3+Mv!6m3ph%B)nS=tmPQC5QVjn_+j>C>B~h&zGJxG>r2 zm5~r1viosvfc=7{@C3Y$3RuD0qkqH8!$2AUQs*N+w!}5Vy=2(z(Hx&)Cac(tm3g`t z3S<_F=~y=PMW!QoH)G#f=35XOAiM)FQ^o^UPy-p)!*q}A!ElW{Akcko2h~b$I+Gz3n;>-ySG{S-kKzf}^0H%3WjOk6yRG9tA_)0LdSb z=!`a&{vX!b)A4zCbMf$boy|#ksxeNu)5~iG#C-|DXp|%S_;jfZ?!`y3L|r3(6=&6S z(?Jm}%Av*6CMD;R93El6NHsKxW1O=r#+}J}bPB%VQJ_f~k8@q3)svh`nzua0r)7{{ zL-1DOd6i7DppeG0`yrzPdgAoY-4gsPCaOu2E9EH@f#`5ovQmFvfCP-2^KC$k!ipETCgecp!P#ICwHehap*(f{s}~3j_Y>f8 z&fft|aU@OPO|A0e)4jgmU$YOeZ-@)9V6BuE%6;Z*T0web1dc`OC;tZ_A#@I*_*gQT zS~*dMDZwI)$1zOijNyW99{CBgWV|{76@I7QI(q?WA{VcAf>WXTgzw^Yag%*2+NFS< zHCqzI7KRr14ES0q;PqHJZ)jq<=gp|$n%_GDnkr`snSI9hUYKL0I*clwGwBFQ1WiQ_VE>-_!o(F|!;GGRem% zozKri8{tDCoqtZT_2o_d2ir&}qaPkhlyJVEG5o9Li0sVik&YKG%77mRSwhZgNeX-( z=E$`#fi$)mY`K_>hSz~#GB)jDwB}##iYCl5+ueeXAmYMGr1uqck8#!}xshuLYcnFXH^=1(YQ%8IZ2b0M!2SH>*+tu5Yfj-vPCs?U^kd-WIh{bst5GOr&e` zwOhQV#NeINh%LfGb)Q=&8(9%}Rq~mgs^x)^Q8L?>JfRF$JI39y7&m&gk65lBda8Jm zWiBCL?|?Ak7(P+Pk|S4yVxZsfW-A(@8*X*mD_re7=4~oKkWMH zPlfvohx6LjuDnb)r+p6KWri@f)h{<#@6-c_l;*8m2NG|C=~$7b+E}lQ7gYkBfV7L9eSPVn=8pnSLxRW=3d)xiio37=dJAw1XsmR*>`TQxqu-0LjC{){_4~(X;p(KbG^&w<&xE`Q~ z&^$tvM~q1n@&e*BBpPQ*#HYuha1W1@0?2%c8z*LWfZ-Gl@ANNkk07^a_1ra11hd>I zOzARVU@$Kg<&?@%DcJ-^i`)>!qW{HQgjg6Gq0sMI`GWTsdf%bzMr}`M-0|2;zm>Gv z80Z-Al2rDe`{zC^t*vS})UI6X#lP27FPE(CRU5(*zrw^F!xr$-v-Vc6<4S8lwH9`r zupx(%aaAVzLz=9r34k+=Ra&mp@8Ru~X_eINYkOry$PH>Z;O{rUL_`Tmg42;8ehp|i z1agfl4G0b;FoxiyvKLy}SQ7E1@byACW`Jq7EIy@k6b>pKqIaetZ_mD zA5$n{bjenxu$R-VO2vFwhsGVR;2i?Vf@goh)R|d&;ifZ~aAtzf*fp~iCFe~c1c_CD zR`r3TcFRgb9M%=Rp7Iu#EL@H{D5HnE)>P6gHw)k9iL%N{v7tZZ2t}b|ZFu&7&#hc0 z=g0Hs>Ff9Uy8k;Fx$3ZKuf@4i0U&lR(6>|sEIxlC#Q9ta$x~Ttq?i3~F1cI+;oulP z&Ih+-wTuqt3ixcAD-!!>}Ykl4dQ%aM`0 zkrDA;bILCGSa0}Pj(??gdw`hYL~1+=Gwid3kx&i(c(g>2>@<=Fjn#9J z-gu->4N~?O(g!@B;c;y`o*2P#gxr6+@t@WBY`FkiWZ(-ecH_+tV4Cvrqkm;1~SE`nH?a&qs@g~T=NG6WHb;E@=7euA?OI#&Hw z(*j3;prCPlVD1hsKS*O~ z^K}U{zJ7JDqDy=&l8^_?!vwiH1Ifjemo{Lv(67e53rj)5IR!KBV$p@&l}2!{;5Wue z$p&U1RDL03%*Ec#4$L-TH_1sBY-&908Hw?n9)|FW<@qT69B@fW_l(I#`UlHH?uA2W zm0*EikaVi-&XV*2%2>9XzRwfCOVIDjfgb&Ksz>+gO(L3us1rQ6>or#!#A7U}bZ^crS_b(jIVCj1 zvGd-X>7e}SBH2bH?pI0zbXsQ)zfCc`o7AIP=Rgl$KZRFpF*vavsInPz167)a5|>oOz>Ls6q6IX9pafe3Z;O-SgOmUEy;pp>t?CUbC7atVzk z$LzJGt)btkk&PM+qGsJZ(=b3BP3#jOYDe@87$sHJkej=u#R8B98lm7|;aO$2R3hsk zXPg`YeQ1*8lqe$^mvWWt$0*8*Wa`F0CXv-X5-c$`CXw`0+ljthLDM3OcV!yTkE#93 zQZwYQIZK+}O{8sHde+A%SAM8=#=XE=pjGPE6JAKH+va11mff75!ouI}cDaPMTpE0> ziUb?FUtN;n?Xh|!99OHG!;HICiM1w62HPNS&%1$d3+TWrE*3O_xFH9Wy|c6+*IMP! zFT~;e&@+F;7)GoowN4X#4yDKlx-PcWv=k!OX>3x> zw{%1(i7Z8-gazG@xdJN{g^h(3g^cM#J*(ppzQZQ$Z!|Tp*bKjyUPpwk5g@pI@Lmg( zwLUw{@BU5?E^G}C&UIrh(DLS@g0D4)?VSA_QNPW??)}T72iekvX>@do{GpoP5Jh9s&$|>qH_2e$!;C ziYChTlO;sVE~Od`Hu?!C^j+Mx$a(*JzWd3F*1K(f@{3;^@pdCm7KwTt;{Q*QwfGUU ztl1|y(*X)WhFj5W1XjhD)S}O5>Jec92-sE1EE2m$gn)(}px+;IumGNq5}4V7n$@-O zGTVH`ck_7-rWDSvk=2lcZ17nk*{$%tI6@WMW``wD-SQjg&=`;;*m8pA6u^{E1jEmY z0xT>>>HS|f(RTBJ?`Ev$tFg5;RePw{bfrY6x+8HG4t6*hlxn2zo257~LsieD3k3vo5s;1I!yTKi!kTBD`g#Zk5=C zf{}6oge-2 zeC_AFZ=c&Noz-$9OkVNM{mAfOSNjbwM|)#(5ahQ0G2}TE00VvHfVOO4oNf8718dQY4k16AVJ#fJgzpVV!Qo5p4AY&6Hv)**}5aOk-o)}fVYBURbS*VO1?~o85Sz9 zEET`>0!>zTK$R?AVXIuNMF;=d>rZ?^xSkmK_C_N=M4R7aVRG%WJl2to|5GcRhuN;t z)b*k*Mo6RZ*o6W<`@n$Z$SC(ltI_N9>_FHqbJaQX+5hXAyM&4~gD1Y;? zoaENawFvzM*-n?%YQFLHdAJYUG14}CTLHa0w#aQU(7RJlocy~pJKBk>-#i;;E$MVq z{c3qD^51B;G%vEo)dYw?7m92WFpEgafvk_r-1q2vGw3Ur%t}j(x#XJtL}MBSbx@3` z4WuF(8F7hnz}#nK50X%oV1g)qB}^2ZTT5JQ(n{#t_k=6rU()MnI+~S(op%AUL=;=9 zH4@px@!=0ur57nN$Rr`@Z;zd8?081g0ybd1v}N!sE}=@_j8G8}1WoJ;GJDKoVArN)ZfR6{mek_2kJK)&Q9~WSYIvE3iHOVpXv-tZ`5ClRv zUHiKWqj#3%7k3dDBU4C|V0T~_Tp3MEzuUpmpzo57oCPS>7%G?Lak?*Yze6PVVaN~= z`Yw6#kR0%D`3Y1GFoVVFez5GEAwJMZB^Q_H=a=5-#c($125r9BwYhS6P z9BOytD6-aEvaZMp+huGMwD%ep;BKJ8OcL@Ci)Y&;pgUbANj&B&QlMgQV&l;WrTU%DN)*6**qdBZ)Va9;N4&iq*bwXRHqRO$8PNeI^}PnBBVDH zMoR6}VG#{;ju+*nCQAu!DqQhb%lxndbZAl%!sj^xD>+lFeAlTii}oH6=D2@(2+;oY z|M>vUtd+n34fg{6P_aGgIfk_+Wld#s&kWyQ%U^E0fMWA+|FlFYb)-+1s#JiR%!~hz zqH+C`*j$(NGJLD|g0cnguibljLHhI~FP>sQZ~iuC6n`gatx}Ujx1l=vBQ<>w7-aza zdH0$Adv|wx2hfn%0{v$HB(_&MQ+KO6u~lZfS!U~00ucD=RtIP&jMo5t0t5XY9T7Sj literal 0 HcmV?d00001 diff --git a/charts/lighthouse/values.yaml b/charts/lighthouse/values.yaml index 1999c5692..adbc43eaf 100644 --- a/charts/lighthouse/values.yaml +++ b/charts/lighthouse/values.yaml @@ -1,80 +1,57 @@ git: # git.kind -- Git SCM provider (`github`, `gitlab`, `stash`) kind: github - # git.server -- Git server URL server: "" - # lighthouseJobNamespace -- Namespace where `LighthouseJob`s and `Pod`s are created # @default -- Deployment namespace lighthouseJobNamespace: "" - githubApp: # githubApp.enabled -- Enables GitHub app authentication enabled: false - # githubApp.username -- GitHub app user name - username: "jenkins-x[bot]" - + username: "jenkins-x[bot]" # user -- Git user name (used when GitHub app authentication is not enabled) user: "" - # oauthToken -- Git token (used when GitHub app authentication is not enabled) oauthToken: "" - # oauthSecretName -- Existing Git token secret oauthSecretName: "" - # oauthTokenVolumeMount -- Mount Git token as a volume instead of using an environment variable Secret reference (used when GitHub app authentication is not enabled) oauthTokenVolumeMount: enabled: false - # hmacToken -- Secret used for webhooks hmacToken: "" - # hmacSecretName -- Existing hmac secret to use for webhooks hmacSecretName: "" - # hmacTokenEnabled -- Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud hmacTokenEnabled: true - # hmacTokenVolumeMount -- Mount hmac token as a volume instead of using an environment variable Secret reference hmacTokenVolumeMount: enabled: false - # logFormat -- Log format either json or stackdriver logFormat: "json" - # logService -- The name of the service registered with logging logService: "" - # logStackSkip -- Comma separated stack frames to skip from the log logStackSkip: "" - # scope -- limit permissions to namespace privileges scope: "cluster" - cluster: crds: # cluster.crds.create -- Create custom resource definitions create: true - image: # image.parentRepository -- Docker registry to pull images from parentRepository: ghcr.io/jenkins-x - # image.tag -- Docker images tag # the following tag is latest on the main branch, it's a specific version on a git tag - tag: latest - + tag: 1.21.0 # image.pullPolicy -- Image pull policy pullPolicy: IfNotPresent - # env -- Environment variables env: JX_DEFAULT_IMAGE: "" - - externalPlugins: - name: cd-indicators requiredResources: @@ -86,70 +63,49 @@ externalPlugins: - kind: Service namespace: jx name: lighthouse-webui-plugin - gcJobs: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # gcJobs.maxAge -- Max age from which `LighthouseJob`s will be deleted maxAge: 168h - # gcJobs.schedule -- Cron expression to periodically delete `LighthouseJob`s schedule: "0/30 * * * *" - # gcJobs.failedJobsHistoryLimit -- Drives the failed jobs history limit failedJobsHistoryLimit: 1 - # gcJobs.successfulJobsHistoryLimit -- Drives the successful jobs history limit successfulJobsHistoryLimit: 3 - # gcJobs.concurrencyPolicy -- Drives the job's concurrency policy concurrencyPolicy: Forbid - # gcJobs.backoffLimit -- Drives the job's backoff limit backoffLimit: 6 - image: # gcJobs.image.repository -- Template for computing the gc job docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-gc-jobs" - # gcJobs.image.tag -- Template for computing the gc job docker image tag tag: "{{ .Values.image.tag }}" - # gcJobs.image.pullPolicy -- Template for computing the gc job docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - webhooks: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # webhooks.replicaCount -- Number of replicas replicaCount: 1 - # webhooks.terminationGracePeriodSeconds -- Termination grace period for webhooks pods terminationGracePeriodSeconds: 180 - image: # webhooks.image.repository -- Template for computing the webhooks controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-webhooks" - # webhooks.image.tag -- Template for computing the webhooks controller docker image tag tag: "{{ .Values.image.tag }}" - # webhooks.image.pullPolicy -- Template for computing the webhooks controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - - # webhooks.labels -- allow optional labels to be added to the webhook deployment labels: {} podLabels: {} - # webhooks.podAnnotations -- Annotations applied to the webhooks pods podAnnotations: {} - # webhooks.serviceName -- Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out serviceName: hook - # webhooks.service -- Service settings for the webhooks controller service: type: ClusterIP @@ -165,318 +121,234 @@ webhooks: cpu: 100m # may require more memory to perform the initial 'git clone' cmd for big repositories memory: 512Mi - # webhooks.resources.requests -- Resource requests applied to the webhooks pods requests: cpu: 80m memory: 128Mi - # webhooks.probe -- Liveness and readiness probes settings probe: path: / - # webhooks.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # webhooks.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # webhooks.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods nodeSelector: {} - # webhooks.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods affinity: {} - # webhooks.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods tolerations: [] - ingress: # webhooks.ingress.enabled -- Enable webhooks ingress enabled: false - # webhooks.ingress.annotations -- Webhooks ingress annotations annotations: {} - # webhooks.ingress.ingressClassName -- Webhooks ingress ingressClassName ingressClassName: null - # webhooks.ingress.hosts -- Webhooks ingress host names hosts: [] - tls: # webhooks.ingress.tls.enabled -- Enable webhooks ingress tls enabled: false # webhooks.ingress.tls.secretName -- Specify webhooks ingress tls secretName secretName: "" - # webhooks.customDeploymentTriggerCommand -- deployments can configure the ability to allow custom lighthouse triggers # using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify # `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing customDeploymentTriggerCommand: "" - foghorn: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # foghorn.replicaCount -- Number of replicas replicaCount: 1 - # foghorn.terminationGracePeriodSeconds -- Termination grace period for foghorn pods terminationGracePeriodSeconds: 180 - image: # foghorn.image.repository -- Template for computing the foghorn controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-foghorn" - # foghorn.image.tag -- Template for computing the foghorn controller docker image tag tag: "{{ .Values.image.tag }}" - # foghorn.image.pullPolicy -- Template for computing the foghorn controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - resources: # foghorn.resources.limits -- Resource limits applied to the foghorn pods limits: cpu: 100m memory: 256Mi - # foghorn.resources.requests -- Resource requests applied to the foghorn pods requests: cpu: 80m memory: 128Mi - # foghorn.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods nodeSelector: {} - # foghorn.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods affinity: {} - # foghorn.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods tolerations: [] - - tektoncontroller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # tektoncontroller.dashboardURL -- the dashboard URL (e.g. Tekton dashboard) dashboardURL: '' # tektoncontroller.dashboardTemplate -- Go template expression for URLs in the dashboard if not using Tekton dashboard dashboardTemplate: '' - # tektoncontroller.replicaCount -- Number of replicas replicaCount: 1 - # tektoncontroller.terminationGracePeriodSeconds -- Termination grace period for tekton controller pods terminationGracePeriodSeconds: 180 - # tektoncontroller.enableRerunStatusUpdate -- Enable updating the status at the git provider when PipelineRuns are rerun enableRerunStatusUpdate: false - image: # tektoncontroller.image.repository -- Template for computing the tekton controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-tekton-controller" - # tektoncontroller.image.tag -- Template for computing the tekton controller docker image tag tag: "{{ .Values.image.tag }}" - # tektoncontroller.image.pullPolicy -- Template for computing the tekton controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # tektoncontroller.podAnnotations -- Annotations applied to the tekton controller pods podAnnotations: {} - # tektoncontroller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods nodeSelector: {} - # tektoncontroller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods affinity: {} - # tektoncontroller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods tolerations: [] - resources: # tektoncontroller.resources.limits -- Resource limits applied to the tekton controller pods limits: cpu: 100m memory: 256Mi - # tektoncontroller.resources.requests -- Resource requests applied to the tekton controller pods requests: cpu: 80m memory: 128Mi - # tektoncontroller.service -- Service settings for the tekton controller service: annotations: {} - jenkinscontroller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # jenkinscontroller.jenkinsURL -- The URL of the Jenkins instance jenkinsURL: - # jenkinscontroller.jenkinsUser -- The username for the Jenkins user jenkinsUser: - # jenkinscontroller.jenkinsToken -- The token for authenticating the Jenkins user jenkinsToken: - # jenkinscontroller.terminationGracePeriodSeconds -- Termination grace period for tekton controller pods terminationGracePeriodSeconds: 180 - image: # jenkinscontroller.image.repository -- Template for computing the Jenkins controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller" - # jenkinscontroller.image.tag -- Template for computing the tekton controller docker image tag tag: "{{ .Values.image.tag }}" - # jenkinscontroller.image.pullPolicy -- Template for computing the tekton controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # jenkinscontroller.podAnnotations -- Annotations applied to the tekton controller pods podAnnotations: {} - # jenkinscontroller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods nodeSelector: {} - # jenkinscontroller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods affinity: {} - # jenkinscontroller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods tolerations: [] - resources: # jenkinscontroller.resources.limits -- Resource limits applied to the tekton controller pods limits: cpu: 100m memory: 256Mi - # jenkinscontroller.resources.requests -- Resource requests applied to the tekton controller pods requests: cpu: 80m memory: 128Mi - # jenkinscontroller.service -- Service settings for the tekton controller service: annotations: {} - keeper: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # keeper.statusContextLabel -- Label used to report status to git provider statusContextLabel: "Lighthouse Merge Status" - # keeper.replicaCount -- Number of replicas replicaCount: 1 - # keeper.terminationGracePeriodSeconds -- Termination grace period for keeper pods terminationGracePeriodSeconds: 30 - image: # keeper.image.repository -- Template for computing the keeper controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-keeper" - # keeper.image.tag -- Template for computing the keeper controller docker image tag tag: "{{ .Values.image.tag }}" - # keeper.image.pullPolicy -- Template for computing the keeper controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # keeper.podAnnotations -- Annotations applied to the keeper pods podAnnotations: {} - # keeper.env -- Lets you define keeper specific environment variables env: {} - # keeper.service -- Service settings for the keeper controller service: type: ClusterIP externalPort: 80 internalPort: 8888 - resources: # keeper.resources.limits -- Resource limits applied to the keeper pods limits: cpu: 400m memory: 512Mi - # keeper.resources.requests -- Resource requests applied to the keeper pods requests: cpu: 100m memory: 128Mi - # keeper.probe -- Liveness and readiness probes settings probe: path: / - # keeper.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 120 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # keeper.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - datadog: # keeper.datadog.enabled -- Enables datadog enabled: "true" - # keeper.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods nodeSelector: {} - # keeper.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods affinity: {} - # keeper.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods tolerations: [] - poller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # poller.enabled -- Whether to enable or disable the poller component enabled: false - # poller.replicaCount -- Number of replicas replicaCount: 1 - # poller.terminationGracePeriodSeconds -- Termination grace period for poller pods terminationGracePeriodSeconds: 30 - image: # poller.image.repository -- Template for computing the poller controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-poller" - # poller.image.tag -- Template for computing the poller controller docker image tag tag: "{{ .Values.image.tag }}" - # poller.image.pullPolicy -- Template for computing the poller controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # poller.podAnnotations -- Annotations applied to the poller pods podAnnotations: {} - # poller.env -- Lets you define poller specific environment variables env: # poller.env.POLL_PERIOD the default time period between polling releases and pull requests POLL_PERIOD: 20s - # poller.env.POLL_RELEASE_PERIOD the time period between polling releases # POLL_RELEASE_PERIOD: 20s @@ -485,77 +357,58 @@ poller: # poller.env.POLL_HOOK_ENDPOINT the hook service endpoint to post webhooks to POLL_HOOK_ENDPOINT: http://hook/hook/poll - # poller.contextMatchPattern -- Regex pattern to use to match commit status context contextMatchPattern: "" - # poller.requireReleaseSuccess -- Keep polling releases until the most recent commit status is successful requireReleaseSuccess: false - resources: # poller.resources.limits -- Resource limits applied to the poller pods limits: cpu: 400m memory: 512Mi - # poller.resources.requests -- Resource requests applied to the poller pods requests: cpu: 100m memory: 128Mi - # poller.probe -- Liveness and readiness probes settings probe: path: / - # keeper.internalPort -- The internal port used to view metrics etc internalPort: 8888 - # poller.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 120 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # poller.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - datadog: # poller.datadog.enabled -- Enables datadog enabled: "true" - # poller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods nodeSelector: {} - # poller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods affinity: {} - # poller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods tolerations: [] - engines: # engines.jx -- Enables the jx engine jx: true - # engines.tekton -- Enables the tekton engine tekton: false - # engines.jenkins -- Enables the Jenkins engine jenkins: false - configMaps: # configMaps.create -- Enables creation of `config.yaml` and `plugins.yaml` config maps create: false - # configMaps.config -- Raw `config.yaml` content config: null - # configMaps.plugins -- Raw `plugins.yaml` content plugins: null - # configMaps.configUpdater -- Settings used to configure the `config-updater` plugin configUpdater: orgAndRepo: ""