From f4b06a0566ed1f61348f7a743fcbf701432116d0 Mon Sep 17 00:00:00 2001 From: jenkins-x-bot Date: Thu, 28 Mar 2024 12:43:37 +0000 Subject: [PATCH] chore: release 1.16.3 --- changelog.md | 6 + charts/lighthouse/Chart.yaml | 4 +- charts/lighthouse/README.md | 308 ++++++++++++------------ charts/lighthouse/lighthouse-1.16.3.tgz | Bin 0 -> 15825 bytes charts/lighthouse/values.yaml | 151 +----------- 5 files changed, 166 insertions(+), 303 deletions(-) create mode 100644 changelog.md create mode 100644 charts/lighthouse/lighthouse-1.16.3.tgz diff --git a/changelog.md b/changelog.md new file mode 100644 index 000000000..3ec207330 --- /dev/null +++ b/changelog.md @@ -0,0 +1,6 @@ + +## Changes in version 1.16.3 + +### Chores + +* deps: upgrade jenkins-x/go-scm to version 1.14.28 (jenkins-x-bot) diff --git a/charts/lighthouse/Chart.yaml b/charts/lighthouse/Chart.yaml index edc18364e..6e4753b9d 100644 --- a/charts/lighthouse/Chart.yaml +++ b/charts/lighthouse/Chart.yaml @@ -3,6 +3,6 @@ description: | This chart bootstraps installation of [Lighthouse](https://github.com/jenkins-x/lighthouse). icon: https://raw.githubusercontent.com/jenkins-x/jenkins-x-website/master/images/logo/jenkinsx-icon-color.svg name: lighthouse -version: 0.1.0-SNAPSHOT +version: 1.16.3 home: https://github.com/jenkins-x/lighthouse - +appVersion: 1.16.3 diff --git a/charts/lighthouse/README.md b/charts/lighthouse/README.md index ed4700eef..48370b1c7 100644 --- a/charts/lighthouse/README.md +++ b/charts/lighthouse/README.md @@ -42,157 +42,161 @@ helm uninstall my-lighthouse --namespace lighthouse ## Values -| Key | Type | Description | Default | -| --------------------------------------------------- | ------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `cluster.crds.create` | bool | Create custom resource definitions | `true` | -| `configMaps.config` | string | Raw `config.yaml` content | `nil` | -| `configMaps.configUpdater` | object | Settings used to configure the `config-updater` plugin | `{"orgAndRepo":"","path":""}` | -| `configMaps.create` | bool | Enables creation of `config.yaml` and `plugins.yaml` config maps | `false` | -| `configMaps.plugins` | string | Raw `plugins.yaml` content | `nil` | -| `engines.jenkins` | bool | Enables the Jenkins engine | `false` | -| `engines.jx` | bool | Enables the jx engine | `true` | -| `engines.tekton` | bool | Enables the tekton engine | `false` | -| `env` | object | Environment variables | `{"JX_DEFAULT_IMAGE":""}` | -| `externalPlugins[0].name` | string | | `"cd-indicators"` | -| `externalPlugins[0].requiredResources[0].kind` | string | | `"Service"` | -| `externalPlugins[0].requiredResources[0].name` | string | | `"cd-indicators"` | -| `externalPlugins[0].requiredResources[0].namespace` | string | | `"jx"` | -| `externalPlugins[1].name` | string | | `"lighthouse-webui-plugin"` | -| `externalPlugins[1].requiredResources[0].kind` | string | | `"Service"` | -| `externalPlugins[1].requiredResources[0].name` | string | | `"lighthouse-webui-plugin"` | -| `externalPlugins[1].requiredResources[0].namespace` | string | | `"jx"` | -| `foghorn.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods | `{}` | -| `foghorn.image.pullPolicy` | string | Template for computing the foghorn controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `foghorn.image.repository` | string | Template for computing the foghorn controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-foghorn"` | -| `foghorn.image.tag` | string | Template for computing the foghorn controller docker image tag | `"{{ .Values.image.tag }}"` | -| `foghorn.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `foghorn.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods | `{}` | -| `foghorn.replicaCount` | int | Number of replicas | `1` | -| `foghorn.resources.limits` | object | Resource limits applied to the foghorn pods | `{"cpu":"100m","memory":"256Mi"}` | -| `foghorn.resources.requests` | object | Resource requests applied to the foghorn pods | `{"cpu":"80m","memory":"128Mi"}` | -| `foghorn.terminationGracePeriodSeconds` | int | Termination grace period for foghorn pods | `180` | -| `foghorn.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods | `[]` | -| `gcJobs.backoffLimit` | int | Set the backoff limit for failed cronJobs | `6` | -| `gcJobs.concurrencyPolicy` | string | Drives the job's concurrency policy | `"Forbid"` | -| `gcJobs.failedJobsHistoryLimit` | int | Drives the failed jobs history limit | `1` | -| `gcJobs.image.pullPolicy` | string | Template for computing the gc job docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `gcJobs.image.repository` | string | Template for computing the gc job docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-gc-jobs"` | -| `gcJobs.image.tag` | string | Template for computing the gc job docker image tag | `"{{ .Values.image.tag }}"` | -| `gcJobs.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `gcJobs.maxAge` | string | Max age from which `LighthouseJob`s will be deleted | `"168h"` | -| `gcJobs.schedule` | string | Cron expression to periodically delete `LighthouseJob`s | `"0/30 * * * *"` | -| `gcJobs.successfulJobsHistoryLimit` | int | Drives the successful jobs history limit | `3` | -| `git.kind` | string | Git SCM provider (`github`, `gitlab`, `stash`) | `"github"` | -| `git.server` | string | Git server URL | `""` | -| `githubApp.enabled` | bool | Enables GitHub app authentication | `false` | -| `githubApp.username` | string | GitHub app user name | `"jenkins-x[bot]"` | -| `hmacSecretName` | string | Existing hmac secret to use for webhooks | `""` | -| `hmacToken` | string | Secret used for webhooks | `""` | -| `hmacTokenEnabled` | bool | Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud | `true` | -| `image.parentRepository` | string | Docker registry to pull images from | `"ghcr.io/jenkins-x"` | -| `image.pullPolicy` | string | Image pull policy | `"IfNotPresent"` | -| `image.tag` | string | Docker images tag the following tag is latest on the main branch, it's a specific version on a git tag | `"latest"` | -| `jenkinscontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | -| `jenkinscontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `jenkinscontroller.image.repository` | string | Template for computing the Jenkins controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller"` | -| `jenkinscontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | -| `jenkinscontroller.jenkinsToken` | string | The token for authenticating the Jenkins user | `nil` | -| `jenkinscontroller.jenkinsURL` | string | The URL of the Jenkins instance | `nil` | -| `jenkinscontroller.jenkinsUser` | string | The username for the Jenkins user | `nil` | -| `jenkinscontroller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `jenkinscontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | -| `jenkinscontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | -| `jenkinscontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | -| `jenkinscontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | -| `jenkinscontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | -| `jenkinscontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | -| `jenkinscontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | -| `keeper.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods | `{}` | -| `keeper.datadog.enabled` | string | Enables datadog | `"true"` | -| `keeper.env` | object | Lets you define keeper specific environment variables | `{}` | -| `keeper.image.pullPolicy` | string | Template for computing the keeper controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `keeper.image.repository` | string | Template for computing the keeper controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-keeper"` | -| `keeper.image.tag` | string | Template for computing the keeper controller docker image tag | `"{{ .Values.image.tag }}"` | -| `keeper.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `keeper.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `keeper.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods | `{}` | -| `keeper.podAnnotations` | object | Annotations applied to the keeper pods | `{}` | -| `keeper.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `keeper.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `keeper.replicaCount` | int | Number of replicas | `1` | -| `keeper.resources.limits` | object | Resource limits applied to the keeper pods | `{"cpu":"400m","memory":"512Mi"}` | -| `keeper.resources.requests` | object | Resource requests applied to the keeper pods | `{"cpu":"100m","memory":"128Mi"}` | -| `keeper.service` | object | Service settings for the keeper controller | `{"externalPort":80,"internalPort":8888,"type":"ClusterIP"}` | -| `keeper.statusContextLabel` | string | Label used to report status to git provider | `"Lighthouse Merge Status"` | -| `keeper.terminationGracePeriodSeconds` | int | Termination grace period for keeper pods | `30` | -| `keeper.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods | `[]` | -| `lighthouseJobNamespace` | string | Namespace where `LighthouseJob`s and `Pod`s are created | Deployment namespace | -| `logFormat` | string | Log format either json or stackdriver | `"json"` | -| `logService` | string | The name of the service registered with logging | `""` | -| `logStackSkip` | string | Comma separated stack frames to skip from the log | `""` | -| `oauthSecretName` | string | Existing Git token secret | `""` | -| `oauthToken` | string | Git token (used when GitHub app authentication is not enabled) | `""` | -| `poller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods | `{}` | -| `poller.contextMatchPattern` | string | Regex pattern to use to match commit status context | `""` | -| `poller.datadog.enabled` | string | Enables datadog | `"true"` | -| `poller.enabled` | bool | Whether to enable or disable the poller component | `false` | -| `poller.env` | object | Lets you define poller specific environment variables | `{"POLL_HOOK_ENDPOINT":"http://hook/hook/poll","POLL_PERIOD":"20s"}` | -| `poller.image.pullPolicy` | string | Template for computing the poller controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `poller.image.repository` | string | Template for computing the poller controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-poller"` | -| `poller.image.tag` | string | Template for computing the poller controller docker image tag | `"{{ .Values.image.tag }}"` | -| `poller.internalPort` | int | | `8888` | -| `poller.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `poller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `poller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods | `{}` | -| `poller.podAnnotations` | object | Annotations applied to the poller pods | `{}` | -| `poller.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `poller.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `poller.replicaCount` | int | Number of replicas | `1` | -| `poller.requireReleaseSuccess` | bool | Keep polling releases until the most recent commit status is successful | `false` | -| `poller.resources.limits` | object | Resource limits applied to the poller pods | `{"cpu":"400m","memory":"512Mi"}` | -| `poller.resources.requests` | object | Resource requests applied to the poller pods | `{"cpu":"100m","memory":"128Mi"}` | -| `poller.terminationGracePeriodSeconds` | int | Termination grace period for poller pods | `30` | -| `poller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods | `[]` | -| `scope` | string | set scope to either `cluster` or `namespace` for permissions | `cluster` | -| `tektoncontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | -| `tektoncontroller.dashboardTemplate` | string | Go template expression for URLs in the dashboard if not using Tekton dashboard | `""` | -| `tektoncontroller.dashboardURL` | string | the dashboard URL (e.g. Tekton dashboard) | `""` | -| `tektoncontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `tektoncontroller.image.repository` | string | Template for computing the tekton controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-tekton-controller"` | -| `tektoncontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | -| `tektoncontroller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `tektoncontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | -| `tektoncontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | -| `tektoncontroller.replicaCount` | int | Number of replicas | `1` | -| `tektoncontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | -| `tektoncontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | -| `tektoncontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | -| `tektoncontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | -| `tektoncontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | -| `user` | string | Git user name (used when GitHub app authentication is not enabled) | `""` | -| `webhooks.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods | `{}` | -| `webhooks.customDeploymentTriggerCommand` | string | deployments can configure the ability to allow custom lighthouse triggers using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing | `""` | -| `webhooks.image.pullPolicy` | string | Template for computing the webhooks controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `webhooks.image.repository` | string | Template for computing the webhooks controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-webhooks"` | -| `webhooks.image.tag` | string | Template for computing the webhooks controller docker image tag | `"{{ .Values.image.tag }}"` | -| `webhooks.ingress.annotations` | object | Webhooks ingress annotations | `{}` | -| `webhooks.ingress.enabled` | bool | Enable webhooks ingress | `false` | -| `webhooks.ingress.hosts` | list | Webhooks ingress host names | `[]` | -| `webhooks.ingress.ingressClassName` | string | Webhooks ingress ingressClassName | `nil` | -| `webhooks.labels` | object | allow optional labels to be added to the webhook deployment | `{}` | -| `webhooks.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `webhooks.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `webhooks.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods | `{}` | -| `webhooks.podAnnotations` | object | Annotations applied to the webhooks pods | `{}` | -| `webhooks.podLabels` | object | | `{}` | -| `webhooks.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `webhooks.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `webhooks.replicaCount` | int | Number of replicas | `1` | -| `webhooks.resources.limits` | object | Resource limits applied to the webhooks pods | `{"cpu":"100m","memory":"512Mi"}` | -| `webhooks.resources.requests` | object | Resource requests applied to the webhooks pods | `{"cpu":"80m","memory":"128Mi"}` | -| `webhooks.service` | object | Service settings for the webhooks controller | `{"annotations":{},"externalPort":80,"internalPort":8080,"type":"ClusterIP"}` | -| `webhooks.serviceName` | string | Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out | `"hook"` | -| `webhooks.terminationGracePeriodSeconds` | int | Termination grace period for webhooks pods | `180` | -| `webhooks.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods | `[]` | +| Key | Type | Description | Default | +|-----|------|-------------|---------| +| `cluster.crds.create` | bool | Create custom resource definitions | `true` | +| `configMaps.config` | string | Raw `config.yaml` content | `nil` | +| `configMaps.configUpdater` | object | Settings used to configure the `config-updater` plugin | `{"orgAndRepo":"","path":""}` | +| `configMaps.create` | bool | Enables creation of `config.yaml` and `plugins.yaml` config maps | `false` | +| `configMaps.plugins` | string | Raw `plugins.yaml` content | `nil` | +| `engines.jenkins` | bool | Enables the Jenkins engine | `false` | +| `engines.jx` | bool | Enables the jx engine | `true` | +| `engines.tekton` | bool | Enables the tekton engine | `false` | +| `env` | object | Environment variables | `{"JX_DEFAULT_IMAGE":""}` | +| `externalPlugins[0].name` | string | | `"cd-indicators"` | +| `externalPlugins[0].requiredResources[0].kind` | string | | `"Service"` | +| `externalPlugins[0].requiredResources[0].name` | string | | `"cd-indicators"` | +| `externalPlugins[0].requiredResources[0].namespace` | string | | `"jx"` | +| `externalPlugins[1].name` | string | | `"lighthouse-webui-plugin"` | +| `externalPlugins[1].requiredResources[0].kind` | string | | `"Service"` | +| `externalPlugins[1].requiredResources[0].name` | string | | `"lighthouse-webui-plugin"` | +| `externalPlugins[1].requiredResources[0].namespace` | string | | `"jx"` | +| `foghorn.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods | `{}` | +| `foghorn.image.pullPolicy` | string | Template for computing the foghorn controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `foghorn.image.repository` | string | Template for computing the foghorn controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-foghorn"` | +| `foghorn.image.tag` | string | Template for computing the foghorn controller docker image tag | `"{{ .Values.image.tag }}"` | +| `foghorn.logLevel` | string | | `"info"` | +| `foghorn.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods | `{}` | +| `foghorn.replicaCount` | int | Number of replicas | `1` | +| `foghorn.resources.limits` | object | Resource limits applied to the foghorn pods | `{"cpu":"100m","memory":"256Mi"}` | +| `foghorn.resources.requests` | object | Resource requests applied to the foghorn pods | `{"cpu":"80m","memory":"128Mi"}` | +| `foghorn.terminationGracePeriodSeconds` | int | Termination grace period for foghorn pods | `180` | +| `foghorn.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods | `[]` | +| `gcJobs.backoffLimit` | int | Drives the job's backoff limit | `6` | +| `gcJobs.concurrencyPolicy` | string | Drives the job's concurrency policy | `"Forbid"` | +| `gcJobs.failedJobsHistoryLimit` | int | Drives the failed jobs history limit | `1` | +| `gcJobs.image.pullPolicy` | string | Template for computing the gc job docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `gcJobs.image.repository` | string | Template for computing the gc job docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-gc-jobs"` | +| `gcJobs.image.tag` | string | Template for computing the gc job docker image tag | `"{{ .Values.image.tag }}"` | +| `gcJobs.logLevel` | string | | `"info"` | +| `gcJobs.maxAge` | string | Max age from which `LighthouseJob`s will be deleted | `"168h"` | +| `gcJobs.schedule` | string | Cron expression to periodically delete `LighthouseJob`s | `"0/30 * * * *"` | +| `gcJobs.successfulJobsHistoryLimit` | int | Drives the successful jobs history limit | `3` | +| `git.kind` | string | Git SCM provider (`github`, `gitlab`, `stash`) | `"github"` | +| `git.server` | string | Git server URL | `""` | +| `githubApp.enabled` | bool | Enables GitHub app authentication | `false` | +| `githubApp.username` | string | GitHub app user name | `"jenkins-x[bot]"` | +| `hmacSecretName` | string | Existing hmac secret to use for webhooks | `""` | +| `hmacToken` | string | Secret used for webhooks | `""` | +| `hmacTokenEnabled` | bool | Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud | `true` | +| `hmacTokenVolumeMount` | object | Mount hmac token as a volume instead of using an environment variable Secret reference | `{"enabled":false}` | +| `image.parentRepository` | string | Docker registry to pull images from | `"ghcr.io/jenkins-x"` | +| `image.pullPolicy` | string | Image pull policy | `"IfNotPresent"` | +| `image.tag` | string | Docker images tag the following tag is latest on the main branch, it's a specific version on a git tag | `"latest"` | +| `jenkinscontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | +| `jenkinscontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `jenkinscontroller.image.repository` | string | Template for computing the Jenkins controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller"` | +| `jenkinscontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | +| `jenkinscontroller.jenkinsToken` | string | The token for authenticating the Jenkins user | `nil` | +| `jenkinscontroller.jenkinsURL` | string | The URL of the Jenkins instance | `nil` | +| `jenkinscontroller.jenkinsUser` | string | The username for the Jenkins user | `nil` | +| `jenkinscontroller.logLevel` | string | | `"info"` | +| `jenkinscontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | +| `jenkinscontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | +| `jenkinscontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | +| `jenkinscontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | +| `jenkinscontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | +| `jenkinscontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | +| `jenkinscontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | +| `keeper.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods | `{}` | +| `keeper.datadog.enabled` | string | Enables datadog | `"true"` | +| `keeper.env` | object | Lets you define keeper specific environment variables | `{}` | +| `keeper.image.pullPolicy` | string | Template for computing the keeper controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `keeper.image.repository` | string | Template for computing the keeper controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-keeper"` | +| `keeper.image.tag` | string | Template for computing the keeper controller docker image tag | `"{{ .Values.image.tag }}"` | +| `keeper.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `keeper.logLevel` | string | | `"info"` | +| `keeper.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods | `{}` | +| `keeper.podAnnotations` | object | Annotations applied to the keeper pods | `{}` | +| `keeper.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `keeper.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `keeper.replicaCount` | int | Number of replicas | `1` | +| `keeper.resources.limits` | object | Resource limits applied to the keeper pods | `{"cpu":"400m","memory":"512Mi"}` | +| `keeper.resources.requests` | object | Resource requests applied to the keeper pods | `{"cpu":"100m","memory":"128Mi"}` | +| `keeper.service` | object | Service settings for the keeper controller | `{"externalPort":80,"internalPort":8888,"type":"ClusterIP"}` | +| `keeper.statusContextLabel` | string | Label used to report status to git provider | `"Lighthouse Merge Status"` | +| `keeper.terminationGracePeriodSeconds` | int | Termination grace period for keeper pods | `30` | +| `keeper.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods | `[]` | +| `lighthouseJobNamespace` | string | Namespace where `LighthouseJob`s and `Pod`s are created | Deployment namespace | +| `logFormat` | string | Log format either json or stackdriver | `"json"` | +| `logService` | string | The name of the service registered with logging | `""` | +| `logStackSkip` | string | Comma separated stack frames to skip from the log | `""` | +| `oauthSecretName` | string | Existing Git token secret | `""` | +| `oauthToken` | string | Git token (used when GitHub app authentication is not enabled) | `""` | +| `oauthTokenVolumeMount` | object | Mount Git token as a volume instead of using an environment variable Secret reference (used when GitHub app authentication is not enabled) | `{"enabled":false}` | +| `poller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods | `{}` | +| `poller.contextMatchPattern` | string | Regex pattern to use to match commit status context | `""` | +| `poller.datadog.enabled` | string | Enables datadog | `"true"` | +| `poller.enabled` | bool | Whether to enable or disable the poller component | `false` | +| `poller.env` | object | Lets you define poller specific environment variables | `{"POLL_HOOK_ENDPOINT":"http://hook/hook/poll","POLL_PERIOD":"20s"}` | +| `poller.image.pullPolicy` | string | Template for computing the poller controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `poller.image.repository` | string | Template for computing the poller controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-poller"` | +| `poller.image.tag` | string | Template for computing the poller controller docker image tag | `"{{ .Values.image.tag }}"` | +| `poller.internalPort` | int | | `8888` | +| `poller.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `poller.logLevel` | string | | `"info"` | +| `poller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods | `{}` | +| `poller.podAnnotations` | object | Annotations applied to the poller pods | `{}` | +| `poller.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `poller.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `poller.replicaCount` | int | Number of replicas | `1` | +| `poller.requireReleaseSuccess` | bool | Keep polling releases until the most recent commit status is successful | `false` | +| `poller.resources.limits` | object | Resource limits applied to the poller pods | `{"cpu":"400m","memory":"512Mi"}` | +| `poller.resources.requests` | object | Resource requests applied to the poller pods | `{"cpu":"100m","memory":"128Mi"}` | +| `poller.terminationGracePeriodSeconds` | int | Termination grace period for poller pods | `30` | +| `poller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods | `[]` | +| `scope` | string | limit permissions to namespace privileges | `"cluster"` | +| `tektoncontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | +| `tektoncontroller.dashboardTemplate` | string | Go template expression for URLs in the dashboard if not using Tekton dashboard | `""` | +| `tektoncontroller.dashboardURL` | string | the dashboard URL (e.g. Tekton dashboard) | `""` | +| `tektoncontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `tektoncontroller.image.repository` | string | Template for computing the tekton controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-tekton-controller"` | +| `tektoncontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | +| `tektoncontroller.logLevel` | string | | `"info"` | +| `tektoncontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | +| `tektoncontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | +| `tektoncontroller.replicaCount` | int | Number of replicas | `1` | +| `tektoncontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | +| `tektoncontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | +| `tektoncontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | +| `tektoncontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | +| `tektoncontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | +| `user` | string | Git user name (used when GitHub app authentication is not enabled) | `""` | +| `webhooks.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods | `{}` | +| `webhooks.customDeploymentTriggerCommand` | string | deployments can configure the ability to allow custom lighthouse triggers using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing | `""` | +| `webhooks.image.pullPolicy` | string | Template for computing the webhooks controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `webhooks.image.repository` | string | Template for computing the webhooks controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-webhooks"` | +| `webhooks.image.tag` | string | Template for computing the webhooks controller docker image tag | `"{{ .Values.image.tag }}"` | +| `webhooks.ingress.annotations` | object | Webhooks ingress annotations | `{}` | +| `webhooks.ingress.enabled` | bool | Enable webhooks ingress | `false` | +| `webhooks.ingress.hosts` | list | Webhooks ingress host names | `[]` | +| `webhooks.ingress.ingressClassName` | string | Webhooks ingress ingressClassName | `nil` | +| `webhooks.ingress.tls.enabled` | bool | Enable webhooks ingress tls | `false` | +| `webhooks.ingress.tls.secretName` | string | Specify webhooks ingress tls secretName | `""` | +| `webhooks.labels` | object | allow optional labels to be added to the webhook deployment | `{}` | +| `webhooks.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `webhooks.logLevel` | string | | `"info"` | +| `webhooks.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods | `{}` | +| `webhooks.podAnnotations` | object | Annotations applied to the webhooks pods | `{}` | +| `webhooks.podLabels` | object | | `{}` | +| `webhooks.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `webhooks.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `webhooks.replicaCount` | int | Number of replicas | `1` | +| `webhooks.resources.limits` | object | Resource limits applied to the webhooks pods | `{"cpu":"100m","memory":"512Mi"}` | +| `webhooks.resources.requests` | object | Resource requests applied to the webhooks pods | `{"cpu":"80m","memory":"128Mi"}` | +| `webhooks.service` | object | Service settings for the webhooks controller | `{"annotations":{},"externalPort":80,"internalPort":8080,"type":"ClusterIP"}` | +| `webhooks.serviceName` | string | Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out | `"hook"` | +| `webhooks.terminationGracePeriodSeconds` | int | Termination grace period for webhooks pods | `180` | +| `webhooks.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods | `[]` | You can look directly at the [values.yaml](./values.yaml) file to look at the options and their default values. diff --git a/charts/lighthouse/lighthouse-1.16.3.tgz b/charts/lighthouse/lighthouse-1.16.3.tgz new file mode 100644 index 0000000000000000000000000000000000000000..126ca6feb4be1e616d177a635d4ba49c2178bbe5 GIT binary patch literal 15825 zcmX||V|X3k`}I%Q*lujwwrx9&?WD;`W7~~wqv46&*lyf3w$GFA|N33eyxX&9X203j ztovT;vq@s$umJyEU^)PUnXEdig{%^{qAwqpIfpi@r3RO+z6Kw+incbll8%F&nUkfj zrkb;mtd)Z!;4I+TeS>%YZhV5G)HU$h4{GV72JhmW-5i>N= zB=|)1r|&r^)jKA!dghpSRckbp;$(aR7qC!jt{#0MM>H+SQf|- z8q5m?Zkg3g*vdkrwD?dlL!)>AAelbA?QCsq{lz^72Lgc{;FYBBQ(u?>c@QMG)5s`7 zyS%$hN4sxV6p^%9=BR2{d-_T>|CmSvVN^fN37f9(CvgBpyMPKoI?nJDtU2hsz)nE{ zCx~}ez`57IEVjZxszDW(U|nb<$_Xpfb_+@LU&7qY#vA|%RKt7>mA4YOUsz)C-OC(T zKtax6*FKn39$JL4cjz#}Q24%dqRJ9kt$oty`Ea!aBCG+kAWKA|@qER_Gb2H0)@VTN zGhP^BS2AK7V0P_`DKu0#LIZRR#^TA5Tu|gn@&pb*E*-+A+~MDC;?<5+@px>f?SlBO z)Knr30Ygn0k}0hC0OpNxo22U&wxnebDG#lpsrz#S+9pya{5xR>*0oCKsf>4~E8z<) zGc`vGLY4ybhjg${uy8VOYHm%Sr79^DnxYkP2S3?KR2bbiNPHv+g!0T})%a+-65UxO zEPl~BI5n(J#J2LZ5iws;L8yIrkA>ysF{%;t!d)EX(a_BX?QDhiA3k+9QrB`kq`Q)R z^da=Ewg}(~!QN*)Ia*8Tf?(#K?NB9zeIt}8f`AYMpP4>1DlTXrXz~F(oVZf}+sqrA zsb%mGlI$7+6fu0hd*2={c`8&tmANJTBO$z6z#cxD1Rj7cV1Dy=DDClB$khNOWlI@=$7_zi?9z9Nm9 zr$57ZmA=Z-P1czol1T|+SQI1lM;2-mqlG&Y@-#K+Psfn^&&cu6eG=?hqJ65+m8Ln- zgseruB+(Eck= zYD6!DzQo>ahMr_|yewLx0UMH(_LL}q&zvw(H84+(i^+1D1r0&T7pcb5}*X8DQ} z*65E&HmU8b3l%;DLMDR~SH@Sasq}v=-hY3R_dzSQ%_C-sj3`sW>%k1r&-T7D;InhB zLhg|i(;*~3S07Wu4Joj37sj*&MT9qkU|e{~4R&$nT@Xp-;kO_u#>{3R>2e@!VhkOr zRAeP{;kyN{4yT^rE`v_A`K+BsSBWyh$_xmhgZ2DrMaA_YMbk)vK0f|6!e?|?91Q=+ zbEkwpVJ?!LcI4wi8hykT~LKB*!p*I*Qau7QU^;B33U*af60pJeyV$e z!R8F@u8Ej50o|KO24#_^be)3(@B%>1&MeV*84az{qS&!o;h(QoK_K_VN6k?ce${C7 z+O+1F5xomT%I7dWm^d=8JOR9kpP>0uzma)3kJ(sOhd|bHl%)QIfi(reh)&=kH$14AJxp?NOe2VpRF}6uD0r2wPg>8}-H66u0<9PeFJWr2LFQ!O7T>GP*vSJ7gt^pAh zv+-0z5TBg>0QFsp?SfH*eP12UFcPXTHFXCeTbXKsSB4p0s4ScHH{d%x3``akO-FE~ z59E@f4b&`fiy{6bHIZ-BBYKUBteg@KE#MbK9;y9CR2|8F>*6%mF5<~o)MtJAbOJEn z1SGHI4?+=DS(PAJ7%HTokG?3~y@F#hl{fARDB~(rv_X?^Bti8%VNVj>7Po7CxagOp zNbngFDtlZcW>_%4Lg|QFp9idV_2X6A+`kk_BPG%mVfoALQxEi_3AF)mUsFD zY-1??{9xpuuftHYm-o~!&-j^ruac3_nB*S?@N*lmlEd*Tkm)}$@PdxTMZKXgijMno zX<>PW0!R2+%?a8dIRXCf=Lx?$ogn$l!`4e#?EhsPOf6k`>i366emvOAh4K;6;!j+= z#Fo=aDpu~%1{0^N+z2ShL}fzl#ff8y2@?l7nTgwrmQ#%(WGD=vKUiejthhaY@mn49 z3Jiht$!CUtwL6mx3lr~Ia@!ovB6Uct9WuSA z3>%U-rWIwTQD=F2#=IV4)N*c;$o%OOX%qR&w5jADdWL6MK@|CPr@rcF;bj4x7tCLy zF}w!}8uB~}Rge5A5d3O}r^HlA)zrhT?8VAq)y*@hZ-HyjDgwN6125|xA)ebArL*0* z3VXWTF{LP{0iv*pZ z^s$bBbfhy3#3s#B>SmPxH zy;kU{E-hvbo{L!~J0ImJ)nd^X8hy%z(+)DYH*5n~i5qA`WFHjLeMAx{ zc=ZfbKJ=4FeUXXw zwVYX`coU9yJvDSlne%1V2dC>)K3x>zeFxiAl)Dy$2xk6XArmNg=s-oAU*SZ#BP~zi z%n%Kmfyxqk-vu%f*^Zr<`@7#L z-=FA4HVWYkzdK&+x=8x8aJbf(=!lTInFOse#$FR9>(nt)@x1l&f~?2eGqj^*0kLE-5*LmaBt~;ePqI7`Si_xNHUS9I?gC zCiAE=>WOCz2&tv-Y7E85ddu`-sV-i3UztMTLiftK17EZPG~12~TNcM|Q0++V{*5DA zZI>~!rHse}Vo+gK(>cYd78g@hg0!@)AU}%H%Og(j~tNdz!7QiFf)@9Df3aQ8?%_iZKn0(geX>A z43Wj-fs?XFXTT~#*-sojOAIC4aIMe*FH z>JASs&2Uj#GW_QiIHt+COwiDVBqE*j!ykiz;|)vFuFA>1!F1l!Tt9R*^iQh3qt>!? zy$PtTmiVt?p8xn*d#PvcDpNgX*`W&jqhX8$I>=M1p+<8pIWe|C3;*552i59kVaGW4 zx61mR%HZB;2%+B)3zGc%@=-yB$RRTSdd>*#C}CcBYA#8o_MHC0>%qZa!M{{w{CcA# zDsIeh5HPNM`WeYGWb87hpb^>5A=jcWw4_)*ndAAvZd#Je*_4{vT{tW2HscmKHrRIQ zHpE-03pA)r~x!ULMtFbc&7ctlb9 zyb`oXg#`G8`FTaylX%3c3iU*3Ew!||Q=h??hjY(T8mWg>w%s_U97!MnzGoNDyT8Bx zj=!qlV$^y_$wC`p`Jwk0daTF8?Pu|k(CEb+1y%f_331Xn&;UZKyZ`!SSNwwR=KUR^ zBhfAGV6Rw!EMXjXT`32X75#ZAHfz@dwMLBAyM?@AF`V_VXCCWA@qVEL!ybAB6;A4~ z5i^4D%fzsjW)`^)(yCzK6p~5Tlodw;xdE)ax_?VU5lz*~#iHyv)uy}LxzyXO!Z|#T ze&eE(Qw9b)neUg94@$LiMYc{D%pp22e9=uut+Q5jbzms6*m|{!n*0EL3d~w!`JJySNLlx1Fz?1JC3?s15Zsj6vfG4+d38qY`7UN&4 ziXf$!Dr=1#6}8jB0mz^OBsPgUV@lG+gI72TqmZ*TIEo}dsD*x2q$0DEM=#RTW2*zP zI^~`jOe6h)iretFY*iXZIMo98>hgPS9OD=(YQkp5T638{H*7Y-@3Rx zKF}EeM)^L3%}E1r#FHYowu9bOX4?M)-6}@21gJriDuM{$MaoJk`?3E|=gY2Il?H81 z#tx`HJF;o%+YdY}KS)OyqjCOjzX3*S60eEqlFHV{o(J#O4)-AZ!C2uMR+Y3NMIUo_l;dj?SxK#iI>FQzN`Vw*iOnfziS=H|_qSRR;kvHxg%IUIlJw z@^VIt(R-mBX(%M7Y-HHdE>xI^R8h(#$FWi0&<6ntLwWGhNi`nj_bep^lW&H*Xvn16 z%DB;5n$mo4i5IJy8gZ-$g4o5Hr%B01c;9x1uZ+3(f2tgl*C)n@6}-*Q7vWn}IVd)> zB`-THMpEw^*Wh#TyEE8%g6x~)Qk9EWLmqvl9W3kkmWEDK5>j=GR|%qi$4D$KQZrZ3 zb$sjRny;p?UP}5o^e8{hHBp2>fNBu0iQuJCE`;bA&1UT6$yj4g= zIF`f^gAxhe6uP2zEO+PzB*U*V!ePW1;)?u@*RpmO^XCR~RIkCRSLBp}Dbm}`3NR<7 zt(kAn>St33E4meEb#&*o@O!f%gedy9HM-ACh=6j3)M6V}cNSXIZD@@JSP0mV*N?67 zZ5w@ct6<^V#ZtGX^sypY1}A5QXiK@ELU^JeXQn~MLc`s!eT5O?C|(P34tcDiaS5wc9Ak3VZQ}!)1nL$WeD`U6d+0VVFBVf4%%i+pIGpbk84n7LoA6hXrJ>0~>cz|G zl^ZJ#rNThPqhBd4PGX0;AJ3}{`z92>lffAFzN3Qn1^}6@catsw0pDlmuAPldaBCiC z`>v&K2+rteN^)5=Mm&WimGZ|I<=|MW!+L7kF@=OJE_=D<=lWJDWt=PL%DCOL=bcMO z+)MEK#eKUs_brfUJO|7gt;=HtskbYRh+Dx|!sHU{Oa07ao68cm30{IdulC|UXL>7% z{WJuKEU7;jX;2J8NagJ?5jmymTS9-wSi~iU+%<<=wEUFy^=%-V4bIeJWd`SsRh0p2 z!|haJEuP0J8zM-%glE!f7p1$)gjpi`Ymt^Ly}(F zPWe-oWISf6A_h$-Oa}_^+e6FmELpQ@(3*r>w@48G>6!!@6XoO?=aLZR5g8fw_?rCa zst%rK57?lIEvk#n9at|_sw&maIL7v5ucEvhWtgG-C**j4-`UCg>~i<_^lo=A7u!{z zL_$5ef>5uv?rp2Y4kH0(m?c#awqr%kbO-h111T3O2$K{u7KAdK!!q7}7@%XHuVg{DeeeeA< zNJR4P3RN)uRmp6zkL4?1kbv#mPS%tukI>4(>M@Mc#QdhE^dt zw!x5c#-UWm`)_WFz>QG$-@m>v&@F$hGyCgSLQOcidwbi^MIUzqoanXQ1A>U|hE~fm3qO-mtb@0F z#RuJ<-0Gpk7X_=YV?$E5a${u*8p}T6{%Rag3AK37F(p|qBK(F?5VAcVAWLv#b0b(+ zMZL3^1ETUy8fe0t(VjOa}bjLW{jJkl(X`jmH7t zd&apo_&mXM7;baqbdBp&HxNMio#pANWD9}ig0%Y}PTxR&?>fWJ;Hq{|7Wh-~k^IM^ zNWREUk6Et&t8UvU-7w8LYjUI}S)$AHYWHjjr3q9)Cpsk;-n#(W{n_+jM3LTEhaRDulB z5|%U~H58O=wskl2?4uVeQj#RGXXtT^N5Hd_cB5tPw3|E|M<-S2?@Ty~Yd9dEg*AXa z1aTu7|7jMBUnX2byMG1vE5jTj;u{|z@hU1r{-0x|3BUU0WQVNe(`Swa$rw)7XGdsm zZ>up_Tf4_7?MhYK7OZKd0C{>YcZP;A%9egh)GYO5NuF{Jvg;*pah|Rms`JzQ-(vBG zFQ&I_+=#P@{+bncaFbpmjl{^|Amhebr6}WJfQuxX>^E&Ioy@@d!yDyJt5Hx2)5m*v zLmp>JRrH*Uu7Z_J&4+$k4#H+idMfoioc;BU5qxNfo4{V;avX+{NV-S1$*B1?CFy?RaV2OkUz=9rgrfGm`H`3Dc34}UAs0bcE93XTo%o8c^W5JKicJj4^>?h zI)YcGz&(NWC*Yo*g^ek2Ysm{KbM+NkKK8z^Qtuvz9Ib!cBd$9_edS5h#UHL+vFlCd zAC!I_EgrU+1Hr8<>{c|=fC^GIMWhL`d{ifBy= z%gE^wTDP%=g4?2{k4>*`^*k48UQrS6@t;6hClWE-1!W+oP3l8w$Fb0$A-hIJQ(MG%aw_y3+IDYqmB6kmKz`f zyR%-hR8sZ^{BN1h>cJ!(+{W`@VlSuk3|7a11VZxtoyjJc0aHyu*v)|b>!WXWoPZSI zTB_gPA3Q661J$G+_eriMvo&+Cbcl60`5u5RAI`#HtAC6`fYT#zwNB|P$%C=uort|0 z;tY_FFmJU3NJnji4`h!wo4aiFaB?~wLw=STHVT^-MNiLH0k_j4VoSCCG< z7taI8Uaq>~f^Mq-M4ghyzMa)Ba&neFNx~aO`24b|q_oe00nK~If*Al0Sp1$sSgN$U z<*OLRI5)Fx8191L3W$KvUriiaF4J0;$d16&uc>-ELl=o_(tp~#qL12yRD)P+(Z5cv z-xuL$&a!cuN8hvu$abliC|Lsc)N7d{51H>wW@%Ws zcA*keY)IP$fbAJPC%$$5ba7uxwXLh)gE40zI@7e+-n-_C=wDO%s=sJ6CPj>1LnmeO zP&4I=IFCUIbw7MmIj#xnO>#RztT#Ct0Ccu~@uhTS@f>pY(3Bsg=PYd}Exd*2z+8@I z`3g**UXIwha7@oX?oIV#m+wKQ>#fuajs@_xw*N`3((rwhxA;1zH}VsQG>cNZZKbB& z$S9P)YwNU0yE=|Z;pbW!cO!l7Xc?#d#G~_$QMW*cph)M7i-(7w2eqk*VV((5-uF)4 zm&f6y>wb}hc>8tUB;o_xx_CHBZ`8EM&J*%m!^@&1EJh>ijSiez!=J|2!2`B|ozyh< zrCC+AQkOYoWSh_8;V1~@(^&J}VQrlv1hA!uMb?H?Da-z5c6-!dpboxzgKX zYsr^}i&n+&R`U#@WjR*W$;InYhtmD;wWiyMqT@p=td3eHX-d^EwQ+V!66Oc`uA9H- zkTZS`&=Y8d$48WDwMg@dL!hX=&@mvcAu}8@fC^o0YBlZh94a&P{_qM^vsc$79puel znevTq>fh3DSlCmfp5^_cPr)#+G$<7QrgiRV!1bO5b5Bs+c=Rk11FoliTOq#&y}W)Z zMS2V5zim*%-Q9k9)-4eJrwGT=I{4l@=0Rj*jdVGQ}Nu?QJuPsJwskq zm=~g5VKaPP1TBVk#`La$N+*X>(tI{Os`3!pE0V14hfc)J#7rZt} zCUr$k&U-NBTKUNO0Pdm!Uw&LvZ~cBVk)fZ`)zPc>GCXaJ$8h`Y8vsYaI|Olr*Erq( z*Z()a?lF;N0Wt-nw10(h>8n%#Gz>pz7tlCuFeGN+xPJy!Yk|F%YiknUd*Wc?VP5cYxi2T^``$>FVDvpma)DtH<(XIlN8#Qtn*~=ygB1mi|IRZIJ=EM6}&R zS3GEgAoRY`&&C~E5JUJlND;lKG?OBzV+{Yl*8Q|4@N@!Z3B!6*iL^X<1X0|x;o4HZ z*E$1XR>6Qx$shkS@GG%Z0tba*k$+zLI3M=yQULHBa5+f&szw92<)Fsv-j;8ZOJlMqs6z*7Gs zZbFfH*6{UHv-+{a%@|0-Ix(#70Jr!0Sm#%*zrhNx0i_;b2XiX1&3!s>IknFCy*mFfC7iS@fL!nsSk75DdC`jlr*^sET9s)dc1ky3XT#pY+7u36E{=C%v z#qWck!P9;iV#oEt-&gsi1w5FfXIaP@pHw4L;n(lx!h7g$ms@>9%$eN7dC&CZe|oOR zMmV|UOrJ!)IeCRBMe`2#);qInIXk)UTv`O6)s^!P2J$!IJLqKT`Z=prI0< zlvJAX8DD*^4}MJ&bIRcZC;Zr-Q!i&qK!m?n)NjO+EQC^ahXdET$QMZ>>u&~`5DeMn z@J}SS82r;Zor8guNZ4NU_D10Io1GhP;Y{1M&+>hZ`4sqCZ|t}SuBE}IL|M8mYrUl> z94eOSye*9NE(`HVm0b8hCmeEgT;CKk%&2oiOQ_)y=X67>4-%Xd6OI8&@L(3wb7EwPVe>TnNB-) zO5z=zTO;go_iTx|bjYc`%245V+6ugZ|EgP?|9xOf`;f*tti6Jx9eZ`4cT?L2^_T{v z3tVbDi+(#epG|k%h|7N)QTb50pfR!8oA*rRXyQB%^(H|@Ilc@#>y-DEI6V^{Q?2R5 zyy0=tz7vzS*TS{b@(E5tD8lDaa3sfOB&zHq<`(685j2BR_v|;JauC(*b+O0fHbpjr z$~M&KKCx5vBy%0C(w%gGB;_ow(gTDpJ$!Et%vX9$p1iJMg-<%{{FMgl6N#`gQp?yU zq1nsh6a_&2sf+qgws{)3j14xc*J%Q;T{xbBmy_*gfW*wAl6vmlHvsf>sDdy^$<#k@ zdcPK?o=6QfqS~G%M%G~ng(K@k92=n(`hthKZCm3HM+spU7+?xu)abSa%2m_D+`RHQ9EGm z%=w!d>jHI+=BzmpH|To#47Tz!kDon_i~A<=MGC48I??x!n%21jwR&M?55(;{=aRwDneUy{P+_3)nN*b$`q6sGZ0_|$3`R|E02db zpM015RNRg8h-QLQJmfh{apiEi8_BXsl^Z9&WufCg+G9JAW)l)lyLzndUb#H)53U`T zX+M>gwdEB~yRxs{IW(>PJ@}rqh^>(w0ISFQy+$CAkwmoYIKj6j0r-|#ULMc&(>fVm z=fauAoJJpVYu20-$jgR$-Z6+WS!AB4GzG;YwBIl=l`A&|wJi9DR`g@eiyQe6TpAB) zcMtA*H-AgWReH<6FVTMn|G5JamgYRYKu;XgtZf>gLn%LgI zf))tMYpmi*4J-7S+Zx1Hs!qmbM7a~c%HBFA!mxd~KJAzYaUFmah+vlTz;`h(JKNMB zliBJLAE3qOnS2TL7x%KVTVR&ztmiFwsoL#rMd_{mzNBLsc!Z4Bv+bUrEMx~C&@8$r z#gtgMx2Cnmu&+c?b(N*QZ)?+-LOU1kwvGgmt7E@mXb^LH*_pZYo3Ae5jnh)Sj+**_B1&ZW85k38A?~yqO%C{-2Y%1h@wa*gfj$iq1^qBz8Fs^E=Vi0O z8a{n~%mu+{Qh(@7^RkIos@j>8lL%6E87jgF%^$8i@d0l4GWUSB z`&MOawh~EzRwyn`b?Q7roz%?YS5N!Q9(;od(5%%;j-dnND~^9Xi+vZnPJ;B1fYW@X zUT@a6^fQ{=H`|3HDvq=q5ngrdDgB$Tqf`@XZroAdTU5u2J7T||LBVkHTvWoUVq4NO z&CyP&l09zz2uC<1VO&%V<5Mv2banCZaeMw-}Kqj>s9Mrr=8iYA42hiT1pym{ZOk7SQA0 zlXIL(S@ImeNm9uk6}H8AwCluVnk3F5(hzD)m`H=OJ9mc>Nko<>{iAA~YuG zd$WV&wco*KwLPpL-`#v!qv;t+4$k+vZcRZ~Yw7ST;(}fvj2ef}$_v z>HXWCe5)KusZ3prKrCAH6$swEyfYE#@hT({9+D{87+;NTA8L~`7epepD7`O|)M4`; zuYe8WBvowtTL;ZU9cw>w0=O<{_&Ite>MW9cuXIN5_I2aaoKihG-0Hi6dkdJG%QKNV ztSez}aBd3hl|_M$3E915>EEe-;6Ea>NI$@-tWk}jk&sN|FLJxI>}n9UZ-w{_6bxk% z6CYD(MKQ6euD$B&0^Hqy>uD2~Dc%0<&lU&D-(%md2=FkU`(;g7^jb#V1RaKP5ZhSz z@%c2_TTDysi)CAhrc8lyG|_2H$O&{Tr-< z3DKhb@y{7VKMDK_o@p(?{^k84MO7{$Qxf3s^G4bo@qb5f`g_+xWJY9Z6WFT-lMj&?h-u=ERvRe$3@e*A;hkVN`pI zx5au5`o{hQw2lpie~=KLvvXY>2Y&h##~=a!pReGRlst%lTu!JQz|!lkSFAW|tn2Qd z*)QlG&IJj&<@ZR2v20L`@s!n24za<2CkTA{3^u|SgPssnG32ct-{3DItod#Ac;j=&+QzOTz7G`fB3wVDe&<9&Jo9b zgQ>0j!be8WI!QaHNMMPs8|NGIGw-AKgbv$eH!qK zhlkfTzW~3_?R5pmHNWtm6j~oh!sSdbooDq3oBzk!_USU6g;x3>03W!L*`n1UqwM#pB^t?1>h#M!yvs2}66jlNJO zJ{z~`&-4k1t#}yBmBW9=+@zBk+&M5xC2h;ork6FuZ4O0B?BS_HvW4oD-3fXR%7H?3e7Irkvbzs<(wP7xka}j8X64? zCC=KKLC_yLtWEp(+_!>_4vWhPB-w@O(AZ&2u|MC5s_5KuXv>5raG4%ec~xdAlzpgi zsON5KmwB!rwnChD#m}UBfAHvOOwhDiy)M5TMV5R}gZYNbook(U^{Y1onD2FED&^|? zdHQi6uaF5Ra7BIf%lvd2c~a}p>?AyY+cF}^xu{cs(WPsO0})JPHRh^z`tZAznq-Xz zTkkm0e>OVr@(+n&Co1s$tPGMmC?t7;oklZW_^yLq;BoN-0u9d(~~3 zzhB`l{17|IEsSjWa{i!g5Xri?;*AI~Os$5~saToHRX(*M7yD`-7H_pgXhxz4{qr5R zPKMPq9cRI6LP;A^+o_qdC?#(KCs?Zbt*RF?txHxOVZW~EDMO&RWbWUyi#m3gXH6yj z;)3BNahQgp954EPj(9XS-kR@!xzz7DmME)v&LduSQND?o9Lr zEk-l_g%L($@K)!i^H}fb7SY|!2V-L0-Tj3+u$8TJw~+pT7jtMK;CLhX4E2ag-tdLfzM>T8hm?yb~=-!@<17k>5WGw1(Xp>+#kU_wclrVYfawL9*{&y zdud+i_6|^pS#(8%1r?l#0RPTh6gh6etITQA$BfkU+Z(>TZ|;BKHG0NP81b-J@t-`a zdb}F}cpoB5TAhgY%#ho`1kxhQXd%h|82eJ1&h2C>NLnBp%G%RQDWy?>^H)`^)qIRk z67uAeT1@AVKX^A{GAL-z1jR%I$3fuTvV9U)7fT~T?b!4Ysf@`()EIM7wyA)1_oL;3R15a-Ph$0|B(tY5^dQa5YidSQCMp`P-x z7IElxj%GlG&YbqZmsN`Ub;Ptt(M_$j>I+0i+8fRbfP)lSi>|5v;=~LcQ*_x zkXb*=mV}aZsSJt|TJ^zyAR9Y2l+Z|N^4v_t8L?c8dc?Spl0KulxdNan-3F;w5Z(Pe z94zU1FZ$YLd1t-*uZlHdS9PHU*(#Eq>_sVy#00J+857=o>b`6?+YNr44uhfbGs($t zi404ObwexlGjOk`R@F1h>QPk)`h*GyIj1C?IPj|tFQOddakF2adGkB;Gp|K-e%NKb ze@c0r*Lrh>p2Lih%G}hJcNVGRpcZeFD%qN@n@1ATCA(MDYJ-72YHwh@bX&9%&=b*e z1BiGeqXVtEkXr@qgchB5$6pV}IK!{>>kHpqq_Yz%4S)*|StEL3Vm7E0zaCrF$d>7biCs73YItL;Ks) ztVs~kOnP2z+1KTIM+L_2hXT;nhTSoFW$TysZS5{Rq*|d9=hj<@-|HcPW;R>~W1dGe zZGJ0LC&SBs`3shM;3QaLR?A;fttJ!CR03eP9dQeeif6I4mk1>{X!VwKbq^>>nL2+J zs5`haA)eusr1X5Wks+mCF;gdN?lQ|u`-(~K=PA6xmevMJ{wPoquR_KBc8IEkM;dF%MgxbvCr2M?;FXyRWFXzq+)fyxvN3Sn&t(m!Sg!qm!3PE3nHShn>&`rOxM_p%Ez*OlvByZ zqncYr9;6H7hBwW*G}((;&JjgHJF<}M$CVZrtvIoK94Ypx^4IQG>10DRFd_!CPNEVxwN^2hi>o}NaU zjlRpFUD5D?p_@eZ_HW&lxHoe~c3Ab;*EzHseq^x9E)!q5Y79l}IVaC>I3sW9Mm~Ae zX=7y%ohZ69Gio4#C++|}e5+@+Z&i|bUJvS=DZ!brjOR1~u)3|DrIIkco!@PXy1pT_%%k zO{w{Zkcz(MI@hUx_%yaxNG?#6Hd%=QHnYviniPdkoB9!RU`YM~q3x%MPswL)Ea+@~ zGkNwa`hCl0RBNDXHNG11@r+2Pg&*4i0@!&NnHOEhRi)Z6F)PHEUaH7Nu4Z5>M=tf( zVH8bGa2VodNk4DoZEjb7WnI`PslIE# zeIf9Cbe4)gdz|sGt13VJu4zm$uwj20qfE*Y0x@w^pea{usyJs=+lT;+_N9%=@E?CY z6Gfm0dQrctqai1}Z%l{Tzh5oFH69s*3z!V*uj3fjHa@Rbb?;68dhlJ$bgxo1&1Omi zNRc4je){+mOjjEAf72gRRqi?9hIWkvN0vq~f1#fpOef6wKcK&o^wqfOgOV()u8WW@ zFgIw4N-XHt828+IzQ2o++Neer5Fj0DXwPs0A6&>u(v!gz3bAn7oT_P54Q=f$375Iq z6$<^`P!mUI`)=K%RO!{Zm0a;Kpq-)EUhWUo4->H~!X|m9QsmM!y>_aDqjwNSC_lxR zOkC4c-@q0=mHSZ*3jElB*BbHTmgy8#wj^w8+=T1YdNrhQB&(OI6N?TfZARt}IJHq4%x>pMz+E>6B6nta09K7*{x1-M zr}^LD>9v85d)QJM(o`PZFSsfFKOuyf6Y`_GtF8|Gd-4JF!bsYiKPl->eAxWn^ z+wM17|P%#v* z-Ka+IY1SsvWYqH@`&xvNX6Nesjv7@e`E{%nPUGNamspgJE@XzDG$9u8<~2rn#pgSn zMGE{$Qwg&w%I}o1gU%C7IIT80)Wp~)XS+hyl865VRlyPl;&lQGfhn_np4UK=bL>5^ z(`E1a*2WOfK!$xEJOF2%v4j68R8i0u;g+An80$PQtCCLJ?~YW+Ia^oH98!sJUZ4Bq z;j(j}ya8J~jCqrZ+_!RY%F^FV!#XKSjzyf) z3wAv2z9NglsL^)YzvpnlARjNa2=B{HsYO$kOe$H98vL2K89x2#Wm$wFuEN0mE~Fz9 zY;{h74#EvK*iSS=g}~cJ^korGwHRs}N{6Ygow9v-utH&`>C=hE?xRz53mI(YF^wE? ziI!v3HXjp{O{IoLOkzh#YKIXVBA>bqwd;_wv*nk2XV$uBCx;cic;f#yN0{jz8WbQ$ z1Uq!;^!NhaIzRVr_czM$L)qu_@>Mk3tTYG1;(jf z;}+tsORzQv7&JCUsQ?$qzcc~d2zvr;2>j;|kSr$?>wZ)di+}BRrg(bif(Dt4H@GRi z6oJxkDFTx8m*`6nxUp`&l?*?DzvDuFzS~9hv9qzg32sPlL3wt5mfEeHsw2@H+bB_5 XFHveL0h4^5-Uc%~M%V#fAOQalss_m} literal 0 HcmV?d00001 diff --git a/charts/lighthouse/values.yaml b/charts/lighthouse/values.yaml index 54f43949c..77edd4130 100644 --- a/charts/lighthouse/values.yaml +++ b/charts/lighthouse/values.yaml @@ -1,80 +1,57 @@ git: # git.kind -- Git SCM provider (`github`, `gitlab`, `stash`) kind: github - # git.server -- Git server URL server: "" - # lighthouseJobNamespace -- Namespace where `LighthouseJob`s and `Pod`s are created # @default -- Deployment namespace lighthouseJobNamespace: "" - githubApp: # githubApp.enabled -- Enables GitHub app authentication enabled: false - # githubApp.username -- GitHub app user name - username: "jenkins-x[bot]" - + username: "jenkins-x[bot]" # user -- Git user name (used when GitHub app authentication is not enabled) user: "" - # oauthToken -- Git token (used when GitHub app authentication is not enabled) oauthToken: "" - # oauthSecretName -- Existing Git token secret oauthSecretName: "" - # oauthTokenVolumeMount -- Mount Git token as a volume instead of using an environment variable Secret reference (used when GitHub app authentication is not enabled) oauthTokenVolumeMount: enabled: false - # hmacToken -- Secret used for webhooks hmacToken: "" - # hmacSecretName -- Existing hmac secret to use for webhooks hmacSecretName: "" - # hmacTokenEnabled -- Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud hmacTokenEnabled: true - # hmacTokenVolumeMount -- Mount hmac token as a volume instead of using an environment variable Secret reference hmacTokenVolumeMount: enabled: false - # logFormat -- Log format either json or stackdriver logFormat: "json" - # logService -- The name of the service registered with logging logService: "" - # logStackSkip -- Comma separated stack frames to skip from the log logStackSkip: "" - # scope -- limit permissions to namespace privileges scope: "cluster" - cluster: crds: # cluster.crds.create -- Create custom resource definitions create: true - image: # image.parentRepository -- Docker registry to pull images from parentRepository: ghcr.io/jenkins-x - # image.tag -- Docker images tag # the following tag is latest on the main branch, it's a specific version on a git tag - tag: latest - + tag: 1.16.3 # image.pullPolicy -- Image pull policy pullPolicy: IfNotPresent - # env -- Environment variables env: JX_DEFAULT_IMAGE: "" - - externalPlugins: - name: cd-indicators requiredResources: @@ -86,392 +63,287 @@ externalPlugins: - kind: Service namespace: jx name: lighthouse-webui-plugin - gcJobs: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # gcJobs.maxAge -- Max age from which `LighthouseJob`s will be deleted maxAge: 168h - # gcJobs.schedule -- Cron expression to periodically delete `LighthouseJob`s schedule: "0/30 * * * *" - # gcJobs.failedJobsHistoryLimit -- Drives the failed jobs history limit failedJobsHistoryLimit: 1 - # gcJobs.successfulJobsHistoryLimit -- Drives the successful jobs history limit successfulJobsHistoryLimit: 3 - # gcJobs.concurrencyPolicy -- Drives the job's concurrency policy concurrencyPolicy: Forbid - # gcJobs.backoffLimit -- Drives the job's backoff limit backoffLimit: 6 - image: # gcJobs.image.repository -- Template for computing the gc job docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-gc-jobs" - # gcJobs.image.tag -- Template for computing the gc job docker image tag tag: "{{ .Values.image.tag }}" - # gcJobs.image.pullPolicy -- Template for computing the gc job docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - webhooks: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # webhooks.replicaCount -- Number of replicas replicaCount: 1 - # webhooks.terminationGracePeriodSeconds -- Termination grace period for webhooks pods terminationGracePeriodSeconds: 180 - image: # webhooks.image.repository -- Template for computing the webhooks controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-webhooks" - # webhooks.image.tag -- Template for computing the webhooks controller docker image tag tag: "{{ .Values.image.tag }}" - # webhooks.image.pullPolicy -- Template for computing the webhooks controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - - # webhooks.labels -- allow optional labels to be added to the webhook deployment labels: {} podLabels: {} - # webhooks.podAnnotations -- Annotations applied to the webhooks pods podAnnotations: {} - # webhooks.serviceName -- Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out serviceName: hook - # webhooks.service -- Service settings for the webhooks controller service: type: ClusterIP externalPort: 80 internalPort: 8080 annotations: {} - resources: # webhooks.resources.limits -- Resource limits applied to the webhooks pods limits: cpu: 100m # may require more memory to perform the initial 'git clone' cmd for big repositories memory: 512Mi - # webhooks.resources.requests -- Resource requests applied to the webhooks pods requests: cpu: 80m memory: 128Mi - # webhooks.probe -- Liveness and readiness probes settings probe: path: / - # webhooks.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # webhooks.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # webhooks.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods nodeSelector: {} - # webhooks.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods affinity: {} - # webhooks.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods tolerations: [] - ingress: # webhooks.ingress.enabled -- Enable webhooks ingress enabled: false - # webhooks.ingress.annotations -- Webhooks ingress annotations annotations: {} - # webhooks.ingress.ingressClassName -- Webhooks ingress ingressClassName ingressClassName: null - # webhooks.ingress.hosts -- Webhooks ingress host names hosts: [] - tls: # webhooks.ingress.tls.enabled -- Enable webhooks ingress tls enabled: false # webhooks.ingress.tls.secretName -- Specify webhooks ingress tls secretName secretName: "" - # webhooks.customDeploymentTriggerCommand -- deployments can configure the ability to allow custom lighthouse triggers # using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify # `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing customDeploymentTriggerCommand: "" - foghorn: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # foghorn.replicaCount -- Number of replicas replicaCount: 1 - # foghorn.terminationGracePeriodSeconds -- Termination grace period for foghorn pods terminationGracePeriodSeconds: 180 - image: # foghorn.image.repository -- Template for computing the foghorn controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-foghorn" - # foghorn.image.tag -- Template for computing the foghorn controller docker image tag tag: "{{ .Values.image.tag }}" - # foghorn.image.pullPolicy -- Template for computing the foghorn controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - resources: # foghorn.resources.limits -- Resource limits applied to the foghorn pods limits: cpu: 100m memory: 256Mi - # foghorn.resources.requests -- Resource requests applied to the foghorn pods requests: cpu: 80m memory: 128Mi - # foghorn.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods nodeSelector: {} - # foghorn.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods affinity: {} - # foghorn.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods tolerations: [] - - tektoncontroller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # tektoncontroller.dashboardURL -- the dashboard URL (e.g. Tekton dashboard) dashboardURL: '' # tektoncontroller.dashboardTemplate -- Go template expression for URLs in the dashboard if not using Tekton dashboard dashboardTemplate: '' - # tektoncontroller.replicaCount -- Number of replicas replicaCount: 1 - # tektoncontroller.terminationGracePeriodSeconds -- Termination grace period for tekton controller pods terminationGracePeriodSeconds: 180 - image: # tektoncontroller.image.repository -- Template for computing the tekton controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-tekton-controller" - # tektoncontroller.image.tag -- Template for computing the tekton controller docker image tag tag: "{{ .Values.image.tag }}" - # tektoncontroller.image.pullPolicy -- Template for computing the tekton controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # tektoncontroller.podAnnotations -- Annotations applied to the tekton controller pods podAnnotations: {} - # tektoncontroller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods nodeSelector: {} - # tektoncontroller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods affinity: {} - # tektoncontroller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods tolerations: [] - resources: # tektoncontroller.resources.limits -- Resource limits applied to the tekton controller pods limits: cpu: 100m memory: 256Mi - # tektoncontroller.resources.requests -- Resource requests applied to the tekton controller pods requests: cpu: 80m memory: 128Mi - # tektoncontroller.service -- Service settings for the tekton controller service: annotations: {} - jenkinscontroller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # jenkinscontroller.jenkinsURL -- The URL of the Jenkins instance jenkinsURL: - # jenkinscontroller.jenkinsUser -- The username for the Jenkins user jenkinsUser: - # jenkinscontroller.jenkinsToken -- The token for authenticating the Jenkins user jenkinsToken: - # jenkinscontroller.terminationGracePeriodSeconds -- Termination grace period for tekton controller pods terminationGracePeriodSeconds: 180 - image: # jenkinscontroller.image.repository -- Template for computing the Jenkins controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller" - # jenkinscontroller.image.tag -- Template for computing the tekton controller docker image tag tag: "{{ .Values.image.tag }}" - # jenkinscontroller.image.pullPolicy -- Template for computing the tekton controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # jenkinscontroller.podAnnotations -- Annotations applied to the tekton controller pods podAnnotations: {} - # jenkinscontroller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods nodeSelector: {} - # jenkinscontroller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods affinity: {} - # jenkinscontroller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods tolerations: [] - resources: # jenkinscontroller.resources.limits -- Resource limits applied to the tekton controller pods limits: cpu: 100m memory: 256Mi - # jenkinscontroller.resources.requests -- Resource requests applied to the tekton controller pods requests: cpu: 80m memory: 128Mi - # jenkinscontroller.service -- Service settings for the tekton controller service: annotations: {} - keeper: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # keeper.statusContextLabel -- Label used to report status to git provider statusContextLabel: "Lighthouse Merge Status" - # keeper.replicaCount -- Number of replicas replicaCount: 1 - # keeper.terminationGracePeriodSeconds -- Termination grace period for keeper pods terminationGracePeriodSeconds: 30 - image: # keeper.image.repository -- Template for computing the keeper controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-keeper" - # keeper.image.tag -- Template for computing the keeper controller docker image tag tag: "{{ .Values.image.tag }}" - # keeper.image.pullPolicy -- Template for computing the keeper controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # keeper.podAnnotations -- Annotations applied to the keeper pods podAnnotations: {} - # keeper.env -- Lets you define keeper specific environment variables env: {} - # keeper.service -- Service settings for the keeper controller service: type: ClusterIP externalPort: 80 internalPort: 8888 - resources: # keeper.resources.limits -- Resource limits applied to the keeper pods limits: cpu: 400m memory: 512Mi - # keeper.resources.requests -- Resource requests applied to the keeper pods requests: cpu: 100m memory: 128Mi - # keeper.probe -- Liveness and readiness probes settings probe: path: / - # keeper.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 120 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # keeper.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - datadog: # keeper.datadog.enabled -- Enables datadog enabled: "true" - # keeper.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods nodeSelector: {} - # keeper.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods affinity: {} - # keeper.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods tolerations: [] - poller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # poller.enabled -- Whether to enable or disable the poller component enabled: false - # poller.replicaCount -- Number of replicas replicaCount: 1 - # poller.terminationGracePeriodSeconds -- Termination grace period for poller pods terminationGracePeriodSeconds: 30 - image: # poller.image.repository -- Template for computing the poller controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-poller" - # poller.image.tag -- Template for computing the poller controller docker image tag tag: "{{ .Values.image.tag }}" - # poller.image.pullPolicy -- Template for computing the poller controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # poller.podAnnotations -- Annotations applied to the poller pods podAnnotations: {} - # poller.env -- Lets you define poller specific environment variables env: # poller.env.POLL_PERIOD the default time period between polling releases and pull requests POLL_PERIOD: 20s - # poller.env.POLL_RELEASE_PERIOD the time period between polling releases # POLL_RELEASE_PERIOD: 20s @@ -480,77 +352,58 @@ poller: # poller.env.POLL_HOOK_ENDPOINT the hook service endpoint to post webhooks to POLL_HOOK_ENDPOINT: http://hook/hook/poll - # poller.contextMatchPattern -- Regex pattern to use to match commit status context contextMatchPattern: "" - # poller.requireReleaseSuccess -- Keep polling releases until the most recent commit status is successful requireReleaseSuccess: false - resources: # poller.resources.limits -- Resource limits applied to the poller pods limits: cpu: 400m memory: 512Mi - # poller.resources.requests -- Resource requests applied to the poller pods requests: cpu: 100m memory: 128Mi - # poller.probe -- Liveness and readiness probes settings probe: path: / - # keeper.internalPort -- The internal port used to view metrics etc internalPort: 8888 - # poller.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 120 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # poller.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - datadog: # poller.datadog.enabled -- Enables datadog enabled: "true" - # poller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods nodeSelector: {} - # poller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods affinity: {} - # poller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods tolerations: [] - engines: # engines.jx -- Enables the jx engine jx: true - # engines.tekton -- Enables the tekton engine tekton: false - # engines.jenkins -- Enables the Jenkins engine jenkins: false - configMaps: # configMaps.create -- Enables creation of `config.yaml` and `plugins.yaml` config maps create: false - # configMaps.config -- Raw `config.yaml` content config: null - # configMaps.plugins -- Raw `plugins.yaml` content plugins: null - # configMaps.configUpdater -- Settings used to configure the `config-updater` plugin configUpdater: orgAndRepo: ""