From ca7387112bdeccb328bb131130ede18b1eb109b7 Mon Sep 17 00:00:00 2001 From: jenkins-x-bot Date: Wed, 5 Jun 2024 13:07:20 +0000 Subject: [PATCH] chore: release 1.17.4 --- changelog.md | 7 + charts/lighthouse/Chart.yaml | 4 +- charts/lighthouse/README.md | 308 ++++++++++++------------ charts/lighthouse/lighthouse-1.17.4.tgz | Bin 0 -> 15831 bytes charts/lighthouse/values.yaml | 151 +----------- 5 files changed, 167 insertions(+), 303 deletions(-) create mode 100644 changelog.md create mode 100644 charts/lighthouse/lighthouse-1.17.4.tgz diff --git a/changelog.md b/changelog.md new file mode 100644 index 000000000..b5006069a --- /dev/null +++ b/changelog.md @@ -0,0 +1,7 @@ + +## Changes in version 1.17.4 + +### Chores + +* upgrade to go 1.22.3 (MÃ¥rten Svantesson) +* deps: upgrade jenkins-x/go-scm to version 1.14.36 (jenkins-x-bot) diff --git a/charts/lighthouse/Chart.yaml b/charts/lighthouse/Chart.yaml index edc18364e..3d48d0316 100644 --- a/charts/lighthouse/Chart.yaml +++ b/charts/lighthouse/Chart.yaml @@ -3,6 +3,6 @@ description: | This chart bootstraps installation of [Lighthouse](https://github.com/jenkins-x/lighthouse). icon: https://raw.githubusercontent.com/jenkins-x/jenkins-x-website/master/images/logo/jenkinsx-icon-color.svg name: lighthouse -version: 0.1.0-SNAPSHOT +version: 1.17.4 home: https://github.com/jenkins-x/lighthouse - +appVersion: 1.17.4 diff --git a/charts/lighthouse/README.md b/charts/lighthouse/README.md index ed4700eef..48370b1c7 100644 --- a/charts/lighthouse/README.md +++ b/charts/lighthouse/README.md @@ -42,157 +42,161 @@ helm uninstall my-lighthouse --namespace lighthouse ## Values -| Key | Type | Description | Default | -| --------------------------------------------------- | ------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `cluster.crds.create` | bool | Create custom resource definitions | `true` | -| `configMaps.config` | string | Raw `config.yaml` content | `nil` | -| `configMaps.configUpdater` | object | Settings used to configure the `config-updater` plugin | `{"orgAndRepo":"","path":""}` | -| `configMaps.create` | bool | Enables creation of `config.yaml` and `plugins.yaml` config maps | `false` | -| `configMaps.plugins` | string | Raw `plugins.yaml` content | `nil` | -| `engines.jenkins` | bool | Enables the Jenkins engine | `false` | -| `engines.jx` | bool | Enables the jx engine | `true` | -| `engines.tekton` | bool | Enables the tekton engine | `false` | -| `env` | object | Environment variables | `{"JX_DEFAULT_IMAGE":""}` | -| `externalPlugins[0].name` | string | | `"cd-indicators"` | -| `externalPlugins[0].requiredResources[0].kind` | string | | `"Service"` | -| `externalPlugins[0].requiredResources[0].name` | string | | `"cd-indicators"` | -| `externalPlugins[0].requiredResources[0].namespace` | string | | `"jx"` | -| `externalPlugins[1].name` | string | | `"lighthouse-webui-plugin"` | -| `externalPlugins[1].requiredResources[0].kind` | string | | `"Service"` | -| `externalPlugins[1].requiredResources[0].name` | string | | `"lighthouse-webui-plugin"` | -| `externalPlugins[1].requiredResources[0].namespace` | string | | `"jx"` | -| `foghorn.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods | `{}` | -| `foghorn.image.pullPolicy` | string | Template for computing the foghorn controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `foghorn.image.repository` | string | Template for computing the foghorn controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-foghorn"` | -| `foghorn.image.tag` | string | Template for computing the foghorn controller docker image tag | `"{{ .Values.image.tag }}"` | -| `foghorn.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `foghorn.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods | `{}` | -| `foghorn.replicaCount` | int | Number of replicas | `1` | -| `foghorn.resources.limits` | object | Resource limits applied to the foghorn pods | `{"cpu":"100m","memory":"256Mi"}` | -| `foghorn.resources.requests` | object | Resource requests applied to the foghorn pods | `{"cpu":"80m","memory":"128Mi"}` | -| `foghorn.terminationGracePeriodSeconds` | int | Termination grace period for foghorn pods | `180` | -| `foghorn.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods | `[]` | -| `gcJobs.backoffLimit` | int | Set the backoff limit for failed cronJobs | `6` | -| `gcJobs.concurrencyPolicy` | string | Drives the job's concurrency policy | `"Forbid"` | -| `gcJobs.failedJobsHistoryLimit` | int | Drives the failed jobs history limit | `1` | -| `gcJobs.image.pullPolicy` | string | Template for computing the gc job docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `gcJobs.image.repository` | string | Template for computing the gc job docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-gc-jobs"` | -| `gcJobs.image.tag` | string | Template for computing the gc job docker image tag | `"{{ .Values.image.tag }}"` | -| `gcJobs.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `gcJobs.maxAge` | string | Max age from which `LighthouseJob`s will be deleted | `"168h"` | -| `gcJobs.schedule` | string | Cron expression to periodically delete `LighthouseJob`s | `"0/30 * * * *"` | -| `gcJobs.successfulJobsHistoryLimit` | int | Drives the successful jobs history limit | `3` | -| `git.kind` | string | Git SCM provider (`github`, `gitlab`, `stash`) | `"github"` | -| `git.server` | string | Git server URL | `""` | -| `githubApp.enabled` | bool | Enables GitHub app authentication | `false` | -| `githubApp.username` | string | GitHub app user name | `"jenkins-x[bot]"` | -| `hmacSecretName` | string | Existing hmac secret to use for webhooks | `""` | -| `hmacToken` | string | Secret used for webhooks | `""` | -| `hmacTokenEnabled` | bool | Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud | `true` | -| `image.parentRepository` | string | Docker registry to pull images from | `"ghcr.io/jenkins-x"` | -| `image.pullPolicy` | string | Image pull policy | `"IfNotPresent"` | -| `image.tag` | string | Docker images tag the following tag is latest on the main branch, it's a specific version on a git tag | `"latest"` | -| `jenkinscontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | -| `jenkinscontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `jenkinscontroller.image.repository` | string | Template for computing the Jenkins controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller"` | -| `jenkinscontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | -| `jenkinscontroller.jenkinsToken` | string | The token for authenticating the Jenkins user | `nil` | -| `jenkinscontroller.jenkinsURL` | string | The URL of the Jenkins instance | `nil` | -| `jenkinscontroller.jenkinsUser` | string | The username for the Jenkins user | `nil` | -| `jenkinscontroller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `jenkinscontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | -| `jenkinscontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | -| `jenkinscontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | -| `jenkinscontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | -| `jenkinscontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | -| `jenkinscontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | -| `jenkinscontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | -| `keeper.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods | `{}` | -| `keeper.datadog.enabled` | string | Enables datadog | `"true"` | -| `keeper.env` | object | Lets you define keeper specific environment variables | `{}` | -| `keeper.image.pullPolicy` | string | Template for computing the keeper controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `keeper.image.repository` | string | Template for computing the keeper controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-keeper"` | -| `keeper.image.tag` | string | Template for computing the keeper controller docker image tag | `"{{ .Values.image.tag }}"` | -| `keeper.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `keeper.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `keeper.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods | `{}` | -| `keeper.podAnnotations` | object | Annotations applied to the keeper pods | `{}` | -| `keeper.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `keeper.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `keeper.replicaCount` | int | Number of replicas | `1` | -| `keeper.resources.limits` | object | Resource limits applied to the keeper pods | `{"cpu":"400m","memory":"512Mi"}` | -| `keeper.resources.requests` | object | Resource requests applied to the keeper pods | `{"cpu":"100m","memory":"128Mi"}` | -| `keeper.service` | object | Service settings for the keeper controller | `{"externalPort":80,"internalPort":8888,"type":"ClusterIP"}` | -| `keeper.statusContextLabel` | string | Label used to report status to git provider | `"Lighthouse Merge Status"` | -| `keeper.terminationGracePeriodSeconds` | int | Termination grace period for keeper pods | `30` | -| `keeper.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods | `[]` | -| `lighthouseJobNamespace` | string | Namespace where `LighthouseJob`s and `Pod`s are created | Deployment namespace | -| `logFormat` | string | Log format either json or stackdriver | `"json"` | -| `logService` | string | The name of the service registered with logging | `""` | -| `logStackSkip` | string | Comma separated stack frames to skip from the log | `""` | -| `oauthSecretName` | string | Existing Git token secret | `""` | -| `oauthToken` | string | Git token (used when GitHub app authentication is not enabled) | `""` | -| `poller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods | `{}` | -| `poller.contextMatchPattern` | string | Regex pattern to use to match commit status context | `""` | -| `poller.datadog.enabled` | string | Enables datadog | `"true"` | -| `poller.enabled` | bool | Whether to enable or disable the poller component | `false` | -| `poller.env` | object | Lets you define poller specific environment variables | `{"POLL_HOOK_ENDPOINT":"http://hook/hook/poll","POLL_PERIOD":"20s"}` | -| `poller.image.pullPolicy` | string | Template for computing the poller controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `poller.image.repository` | string | Template for computing the poller controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-poller"` | -| `poller.image.tag` | string | Template for computing the poller controller docker image tag | `"{{ .Values.image.tag }}"` | -| `poller.internalPort` | int | | `8888` | -| `poller.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `poller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `poller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods | `{}` | -| `poller.podAnnotations` | object | Annotations applied to the poller pods | `{}` | -| `poller.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `poller.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `poller.replicaCount` | int | Number of replicas | `1` | -| `poller.requireReleaseSuccess` | bool | Keep polling releases until the most recent commit status is successful | `false` | -| `poller.resources.limits` | object | Resource limits applied to the poller pods | `{"cpu":"400m","memory":"512Mi"}` | -| `poller.resources.requests` | object | Resource requests applied to the poller pods | `{"cpu":"100m","memory":"128Mi"}` | -| `poller.terminationGracePeriodSeconds` | int | Termination grace period for poller pods | `30` | -| `poller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods | `[]` | -| `scope` | string | set scope to either `cluster` or `namespace` for permissions | `cluster` | -| `tektoncontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | -| `tektoncontroller.dashboardTemplate` | string | Go template expression for URLs in the dashboard if not using Tekton dashboard | `""` | -| `tektoncontroller.dashboardURL` | string | the dashboard URL (e.g. Tekton dashboard) | `""` | -| `tektoncontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `tektoncontroller.image.repository` | string | Template for computing the tekton controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-tekton-controller"` | -| `tektoncontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | -| `tektoncontroller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `tektoncontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | -| `tektoncontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | -| `tektoncontroller.replicaCount` | int | Number of replicas | `1` | -| `tektoncontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | -| `tektoncontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | -| `tektoncontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | -| `tektoncontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | -| `tektoncontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | -| `user` | string | Git user name (used when GitHub app authentication is not enabled) | `""` | -| `webhooks.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods | `{}` | -| `webhooks.customDeploymentTriggerCommand` | string | deployments can configure the ability to allow custom lighthouse triggers using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing | `""` | -| `webhooks.image.pullPolicy` | string | Template for computing the webhooks controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `webhooks.image.repository` | string | Template for computing the webhooks controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-webhooks"` | -| `webhooks.image.tag` | string | Template for computing the webhooks controller docker image tag | `"{{ .Values.image.tag }}"` | -| `webhooks.ingress.annotations` | object | Webhooks ingress annotations | `{}` | -| `webhooks.ingress.enabled` | bool | Enable webhooks ingress | `false` | -| `webhooks.ingress.hosts` | list | Webhooks ingress host names | `[]` | -| `webhooks.ingress.ingressClassName` | string | Webhooks ingress ingressClassName | `nil` | -| `webhooks.labels` | object | allow optional labels to be added to the webhook deployment | `{}` | -| `webhooks.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `webhooks.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `webhooks.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods | `{}` | -| `webhooks.podAnnotations` | object | Annotations applied to the webhooks pods | `{}` | -| `webhooks.podLabels` | object | | `{}` | -| `webhooks.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `webhooks.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `webhooks.replicaCount` | int | Number of replicas | `1` | -| `webhooks.resources.limits` | object | Resource limits applied to the webhooks pods | `{"cpu":"100m","memory":"512Mi"}` | -| `webhooks.resources.requests` | object | Resource requests applied to the webhooks pods | `{"cpu":"80m","memory":"128Mi"}` | -| `webhooks.service` | object | Service settings for the webhooks controller | `{"annotations":{},"externalPort":80,"internalPort":8080,"type":"ClusterIP"}` | -| `webhooks.serviceName` | string | Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out | `"hook"` | -| `webhooks.terminationGracePeriodSeconds` | int | Termination grace period for webhooks pods | `180` | -| `webhooks.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods | `[]` | +| Key | Type | Description | Default | +|-----|------|-------------|---------| +| `cluster.crds.create` | bool | Create custom resource definitions | `true` | +| `configMaps.config` | string | Raw `config.yaml` content | `nil` | +| `configMaps.configUpdater` | object | Settings used to configure the `config-updater` plugin | `{"orgAndRepo":"","path":""}` | +| `configMaps.create` | bool | Enables creation of `config.yaml` and `plugins.yaml` config maps | `false` | +| `configMaps.plugins` | string | Raw `plugins.yaml` content | `nil` | +| `engines.jenkins` | bool | Enables the Jenkins engine | `false` | +| `engines.jx` | bool | Enables the jx engine | `true` | +| `engines.tekton` | bool | Enables the tekton engine | `false` | +| `env` | object | Environment variables | `{"JX_DEFAULT_IMAGE":""}` | +| `externalPlugins[0].name` | string | | `"cd-indicators"` | +| `externalPlugins[0].requiredResources[0].kind` | string | | `"Service"` | +| `externalPlugins[0].requiredResources[0].name` | string | | `"cd-indicators"` | +| `externalPlugins[0].requiredResources[0].namespace` | string | | `"jx"` | +| `externalPlugins[1].name` | string | | `"lighthouse-webui-plugin"` | +| `externalPlugins[1].requiredResources[0].kind` | string | | `"Service"` | +| `externalPlugins[1].requiredResources[0].name` | string | | `"lighthouse-webui-plugin"` | +| `externalPlugins[1].requiredResources[0].namespace` | string | | `"jx"` | +| `foghorn.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods | `{}` | +| `foghorn.image.pullPolicy` | string | Template for computing the foghorn controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `foghorn.image.repository` | string | Template for computing the foghorn controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-foghorn"` | +| `foghorn.image.tag` | string | Template for computing the foghorn controller docker image tag | `"{{ .Values.image.tag }}"` | +| `foghorn.logLevel` | string | | `"info"` | +| `foghorn.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods | `{}` | +| `foghorn.replicaCount` | int | Number of replicas | `1` | +| `foghorn.resources.limits` | object | Resource limits applied to the foghorn pods | `{"cpu":"100m","memory":"256Mi"}` | +| `foghorn.resources.requests` | object | Resource requests applied to the foghorn pods | `{"cpu":"80m","memory":"128Mi"}` | +| `foghorn.terminationGracePeriodSeconds` | int | Termination grace period for foghorn pods | `180` | +| `foghorn.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods | `[]` | +| `gcJobs.backoffLimit` | int | Drives the job's backoff limit | `6` | +| `gcJobs.concurrencyPolicy` | string | Drives the job's concurrency policy | `"Forbid"` | +| `gcJobs.failedJobsHistoryLimit` | int | Drives the failed jobs history limit | `1` | +| `gcJobs.image.pullPolicy` | string | Template for computing the gc job docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `gcJobs.image.repository` | string | Template for computing the gc job docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-gc-jobs"` | +| `gcJobs.image.tag` | string | Template for computing the gc job docker image tag | `"{{ .Values.image.tag }}"` | +| `gcJobs.logLevel` | string | | `"info"` | +| `gcJobs.maxAge` | string | Max age from which `LighthouseJob`s will be deleted | `"168h"` | +| `gcJobs.schedule` | string | Cron expression to periodically delete `LighthouseJob`s | `"0/30 * * * *"` | +| `gcJobs.successfulJobsHistoryLimit` | int | Drives the successful jobs history limit | `3` | +| `git.kind` | string | Git SCM provider (`github`, `gitlab`, `stash`) | `"github"` | +| `git.server` | string | Git server URL | `""` | +| `githubApp.enabled` | bool | Enables GitHub app authentication | `false` | +| `githubApp.username` | string | GitHub app user name | `"jenkins-x[bot]"` | +| `hmacSecretName` | string | Existing hmac secret to use for webhooks | `""` | +| `hmacToken` | string | Secret used for webhooks | `""` | +| `hmacTokenEnabled` | bool | Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud | `true` | +| `hmacTokenVolumeMount` | object | Mount hmac token as a volume instead of using an environment variable Secret reference | `{"enabled":false}` | +| `image.parentRepository` | string | Docker registry to pull images from | `"ghcr.io/jenkins-x"` | +| `image.pullPolicy` | string | Image pull policy | `"IfNotPresent"` | +| `image.tag` | string | Docker images tag the following tag is latest on the main branch, it's a specific version on a git tag | `"latest"` | +| `jenkinscontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | +| `jenkinscontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `jenkinscontroller.image.repository` | string | Template for computing the Jenkins controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller"` | +| `jenkinscontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | +| `jenkinscontroller.jenkinsToken` | string | The token for authenticating the Jenkins user | `nil` | +| `jenkinscontroller.jenkinsURL` | string | The URL of the Jenkins instance | `nil` | +| `jenkinscontroller.jenkinsUser` | string | The username for the Jenkins user | `nil` | +| `jenkinscontroller.logLevel` | string | | `"info"` | +| `jenkinscontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | +| `jenkinscontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | +| `jenkinscontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | +| `jenkinscontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | +| `jenkinscontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | +| `jenkinscontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | +| `jenkinscontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | +| `keeper.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods | `{}` | +| `keeper.datadog.enabled` | string | Enables datadog | `"true"` | +| `keeper.env` | object | Lets you define keeper specific environment variables | `{}` | +| `keeper.image.pullPolicy` | string | Template for computing the keeper controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `keeper.image.repository` | string | Template for computing the keeper controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-keeper"` | +| `keeper.image.tag` | string | Template for computing the keeper controller docker image tag | `"{{ .Values.image.tag }}"` | +| `keeper.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `keeper.logLevel` | string | | `"info"` | +| `keeper.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods | `{}` | +| `keeper.podAnnotations` | object | Annotations applied to the keeper pods | `{}` | +| `keeper.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `keeper.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `keeper.replicaCount` | int | Number of replicas | `1` | +| `keeper.resources.limits` | object | Resource limits applied to the keeper pods | `{"cpu":"400m","memory":"512Mi"}` | +| `keeper.resources.requests` | object | Resource requests applied to the keeper pods | `{"cpu":"100m","memory":"128Mi"}` | +| `keeper.service` | object | Service settings for the keeper controller | `{"externalPort":80,"internalPort":8888,"type":"ClusterIP"}` | +| `keeper.statusContextLabel` | string | Label used to report status to git provider | `"Lighthouse Merge Status"` | +| `keeper.terminationGracePeriodSeconds` | int | Termination grace period for keeper pods | `30` | +| `keeper.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods | `[]` | +| `lighthouseJobNamespace` | string | Namespace where `LighthouseJob`s and `Pod`s are created | Deployment namespace | +| `logFormat` | string | Log format either json or stackdriver | `"json"` | +| `logService` | string | The name of the service registered with logging | `""` | +| `logStackSkip` | string | Comma separated stack frames to skip from the log | `""` | +| `oauthSecretName` | string | Existing Git token secret | `""` | +| `oauthToken` | string | Git token (used when GitHub app authentication is not enabled) | `""` | +| `oauthTokenVolumeMount` | object | Mount Git token as a volume instead of using an environment variable Secret reference (used when GitHub app authentication is not enabled) | `{"enabled":false}` | +| `poller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods | `{}` | +| `poller.contextMatchPattern` | string | Regex pattern to use to match commit status context | `""` | +| `poller.datadog.enabled` | string | Enables datadog | `"true"` | +| `poller.enabled` | bool | Whether to enable or disable the poller component | `false` | +| `poller.env` | object | Lets you define poller specific environment variables | `{"POLL_HOOK_ENDPOINT":"http://hook/hook/poll","POLL_PERIOD":"20s"}` | +| `poller.image.pullPolicy` | string | Template for computing the poller controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `poller.image.repository` | string | Template for computing the poller controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-poller"` | +| `poller.image.tag` | string | Template for computing the poller controller docker image tag | `"{{ .Values.image.tag }}"` | +| `poller.internalPort` | int | | `8888` | +| `poller.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `poller.logLevel` | string | | `"info"` | +| `poller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods | `{}` | +| `poller.podAnnotations` | object | Annotations applied to the poller pods | `{}` | +| `poller.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `poller.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `poller.replicaCount` | int | Number of replicas | `1` | +| `poller.requireReleaseSuccess` | bool | Keep polling releases until the most recent commit status is successful | `false` | +| `poller.resources.limits` | object | Resource limits applied to the poller pods | `{"cpu":"400m","memory":"512Mi"}` | +| `poller.resources.requests` | object | Resource requests applied to the poller pods | `{"cpu":"100m","memory":"128Mi"}` | +| `poller.terminationGracePeriodSeconds` | int | Termination grace period for poller pods | `30` | +| `poller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods | `[]` | +| `scope` | string | limit permissions to namespace privileges | `"cluster"` | +| `tektoncontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | +| `tektoncontroller.dashboardTemplate` | string | Go template expression for URLs in the dashboard if not using Tekton dashboard | `""` | +| `tektoncontroller.dashboardURL` | string | the dashboard URL (e.g. Tekton dashboard) | `""` | +| `tektoncontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `tektoncontroller.image.repository` | string | Template for computing the tekton controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-tekton-controller"` | +| `tektoncontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | +| `tektoncontroller.logLevel` | string | | `"info"` | +| `tektoncontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | +| `tektoncontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | +| `tektoncontroller.replicaCount` | int | Number of replicas | `1` | +| `tektoncontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | +| `tektoncontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | +| `tektoncontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | +| `tektoncontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | +| `tektoncontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | +| `user` | string | Git user name (used when GitHub app authentication is not enabled) | `""` | +| `webhooks.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods | `{}` | +| `webhooks.customDeploymentTriggerCommand` | string | deployments can configure the ability to allow custom lighthouse triggers using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing | `""` | +| `webhooks.image.pullPolicy` | string | Template for computing the webhooks controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `webhooks.image.repository` | string | Template for computing the webhooks controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-webhooks"` | +| `webhooks.image.tag` | string | Template for computing the webhooks controller docker image tag | `"{{ .Values.image.tag }}"` | +| `webhooks.ingress.annotations` | object | Webhooks ingress annotations | `{}` | +| `webhooks.ingress.enabled` | bool | Enable webhooks ingress | `false` | +| `webhooks.ingress.hosts` | list | Webhooks ingress host names | `[]` | +| `webhooks.ingress.ingressClassName` | string | Webhooks ingress ingressClassName | `nil` | +| `webhooks.ingress.tls.enabled` | bool | Enable webhooks ingress tls | `false` | +| `webhooks.ingress.tls.secretName` | string | Specify webhooks ingress tls secretName | `""` | +| `webhooks.labels` | object | allow optional labels to be added to the webhook deployment | `{}` | +| `webhooks.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `webhooks.logLevel` | string | | `"info"` | +| `webhooks.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods | `{}` | +| `webhooks.podAnnotations` | object | Annotations applied to the webhooks pods | `{}` | +| `webhooks.podLabels` | object | | `{}` | +| `webhooks.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `webhooks.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `webhooks.replicaCount` | int | Number of replicas | `1` | +| `webhooks.resources.limits` | object | Resource limits applied to the webhooks pods | `{"cpu":"100m","memory":"512Mi"}` | +| `webhooks.resources.requests` | object | Resource requests applied to the webhooks pods | `{"cpu":"80m","memory":"128Mi"}` | +| `webhooks.service` | object | Service settings for the webhooks controller | `{"annotations":{},"externalPort":80,"internalPort":8080,"type":"ClusterIP"}` | +| `webhooks.serviceName` | string | Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out | `"hook"` | +| `webhooks.terminationGracePeriodSeconds` | int | Termination grace period for webhooks pods | `180` | +| `webhooks.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods | `[]` | You can look directly at the [values.yaml](./values.yaml) file to look at the options and their default values. diff --git a/charts/lighthouse/lighthouse-1.17.4.tgz b/charts/lighthouse/lighthouse-1.17.4.tgz new file mode 100644 index 0000000000000000000000000000000000000000..c7346e286c59dcaff14964b0f14ecd29ecf145fd GIT binary patch literal 15831 zcmYkDV{j(l6R2a`ww-LUv2EM7jm;a|w#|*T;l{RY8ymhizyGbeb?3uW&77J#UrtST zKm9auGzNbH;2g&4Hh$14r?7%?w^Vp8b9SV?QD!4%)Hf< z9r>lq?d(A=f4#bHaLu27)%1epH|k0sXY%+_=2smrJ@DjFpi@6Tuyz@imf1yw&#pJS;-gxXEDMIO`;e=eA&e3m%)I{aEBD5FO`zaYFmBZ>e0l zczX8qJOXwb)c*rG*7v@&eO-Sk)VJ;ItZe~x>wBA;R^IVXYcr5+qj19?9lJ}NG3j4| zDRB07P|Pzzz*(-^j4(>$KNW!LmTgC*$0`KQuh*|bI6roy62ZbkoQ7#*aQa9OSYQf9 zAx2^R1Qf!TZkUI2B<*Ie-ZJ73j7KPwudy+7%(|+>7izrn^RmA-FE)3Js%P?ZZBzUO){}OPBFV_Ic`|p{yIL_F{?% z=L7kt8e-#M5C=yA73*3K# zM0ovNyI|w^7?6klK_W;45JnSmYAPjF&M4y6e=A0i;PzOA8bjj_6{&2VnTWt}#6gDr zVnpKf#~^huAm{vMw0*vQ^pprmy7Z^IDqjZ5FrdCR8>xzMsf&4g~196 zjI7^*Qsw1j&=hUFIy z3!AVBREb?EXne>Fw~;dj>UiKTS_3cEFU~EuX65VIVN37`rq6#9JaSp6N%LU}4L6$eeY`-)x zTyzJdmel!lf}u!Tk^*M57AK61-hw26fE7uKMo^IisXt+0me3J&GC$gRlI(CF)gv%X zNZf$tBn@n`Fdwc|BKClUixSksTz@Cpv&tvYkJcvb7$@MRSTQ7DWl(8UcV_TIDW;H5 zUJE`{b*{Ph>-~Icju|?A1Y7e)kmwJ85;(+`b3+_tx$xyd0-_F*Qgzk^H`gNsHl>;y zjyPa2gX~7`rJMjVE{*Y#EA11{V&W;i>ro!nD2Ph;EOe6ClsYM-2Fw`KYUeW*ArsFI z^e%o519I$R$rT-9zZ55XR$RAF^zZ5p7#BezqZ7P!J0wbJh(jRiA&XTY#xzK?cwK7} zHAT@h#5UnS7Yn}-Pa(JJoTfI@%eV=V1x6&W{u+J^5>iIcf^lRa0PtxwWMY%`eqU&Y z8x712TbbgdH3u)kglwG`=9;4!?I-D_H4XUewY#U!F7*?$W-#g?=>6b~XGSEqJA-#* z24@g=Rj8CXnBizbV8eKsmjXNpZvg1&$vJu#lZhP$G)G2bjECJ)0F=?l;AzrqY^4_O zbyKD#$(u;n3@(d{xod-(8;C!tTLeG)7fL6m8GGZBaNsho%J>u*`0pJs67v`+ts!Qs zI8K8CX$r376dFCYNE2EY7_}by-=H%_&Ls2t>LUGQvEH~6v68j}6DR#^K9N5EvO#Pb{7qfH z9yC|6AF3~QAN2Ia2mSK{L2maKnOG1kDquF`)E>OZWP}=YLLL^O&y}>G3lw(Fbyoo+ z7~e$Dln{p%Q5O_1p!@4tQcs^Vkf1^2YCgT?N!I1q<{wY3k%$=Jy@g^R2Pp;N$ct-O z6@#oy%^5=&d4l>ix3pMn67)`jG>U{6S*VSrkfUG$%`9|+au4R6RleD_;{%-E&WyKL z81#=~ddM#hkD>vQ!fhMwYZEzC{Nz5>YHSb!MU+VIwC;AgbDy%oydztKqMwE)zkQNK z_6uDxNYc?Ou|GZ{y-d=oIM+xg<@vE?m@K{Sx?QxrFROA*i(oaD0#uxIL0MTMnsl zCrVt=NU~}n^O+k@hNMqW!u66e18iu501i3Q({=r(KCtx_J8G|;q?Cpc^NtiR$ow4 z=|tQaQeYJj+F9f>36c)bZ03T|8I!D^^r()(>V()LwX6+Zag5TmNB5@tQUT`xL=|D*+L~Xa=Af6yJbZR{mtJcYz`e0AxOop_cu<#RL4&= z9Yo7Pu)L6=Cs%ef!<93ASAWDp;lM_hwr*#9DUoMIfhqpaokU$+80CvIWdlEcuB+@- zD5^SjGd0`Q^R_>oR(it>rDi%!hHi}lj2E7WUZH>nE>q#Zu}CM#eOWZU@nIsx0Q&pB zppW4>L~7%o56i!~FqA#`Mu$bgDm}eD7+A(JIlZ9=C$K9HJo`c!GJg52f%P%)g3RQv z7)f;`90asM+#b$x*`R!!-Ct^r{)Cjdl_Kh*$Xv`ZIMWiPfimaCU0Y~wU27HD)6txoN?s>ouRZY0 zo>n_MIa zc7vEI`jKB`K#@&PMe1A_gKesa3>WWPES3m_$ek*_;AfQp-Nu8Gj*ZzXGzUt{(_tu^ zqiR})v?*o45HvWAEM6g+jm~svArU_Xkx9--@l89RsYav9yT0BYh&RG>giqdwH+P(fF>$1Tr>g&9sa+Bem0^J@$i?Lg zijR1M$(hjOK7^;oc6&X-0zj7RMfIxc+;Skg*4^Ws@nCs}R@sxc`E%44y4Ef6g?HYe z8Ho1J%eI%<)VcEtn$cm|B5LV|5tC2%@kgfO`oocNsJC%zv7GU+GK$y@ILXwu(%BZT z)EjPS#Jf?=bekP+%y%kWrmes(+?R*=)(sU#>~xZ8X;j%sOG(UD#rU)T31@pfbF7~} zrnY}8H@-U%M&jSchAQ>9byZr-zlS2US1?U6MN$+UpN?0nwyHh*<#O+q_n3r|TXPUk z&5a!r5Y$dkD?3(s$r~Bn(>#1jw3Qq!Q^KJ_)o@ z*9cug9w8wyVGeQTXg0~xECUHfV-=mj_y_Ri{`8}WcHCu&-2nb~=IBmtj$b>^+vD-$ z;b%E~q(&zhMGzBQcf_s|kG*8b-DFODa*epFklJRla2x%7r65=(x1L^$@+V9OuRy4t zRELbq-CSWeaY6-rsNszx})To?CZ7kIRw8} z%Z9T}HZmgN54Z{-sY103XDbMHAEOI_KKU_ds(z&4hpafh zNsRV@d@i~qB?ODvpX+suzNPj*#d#YL9*2TRU{7&sjZi*ngzh5}u2EDz#g&n|{?@&d z54-46;NI<^Eyyl^y16*JZSQ&dad6q}3qE_kMBNd;`1rnO|8u;M8a`XXVPnDQA|{+d z^$xov-1O9`g$FB(*q`gx+#Va~ z`~^hv8^mO%2C>HxC$qGH*i>LV*n?;hrd|T6K@l$k{l$flnOqilaG?pkt6Zf@QbV zB8PL@M0^L=cZwtjN95ZmAk=sdtY(EH6VncttcHQq`3v-H^=ys)04`lWhXL!!e|@fh z!phy5f#B>Lg@6y2~mIozixIDpjMW_>c}e= z1nQ>|e&n3vF2r29S)4@WRj}aDgrX=D+@o3ngSmkyp=z5pXDHPH0Q8N7S;%+Zd+NNL zkz&*yaC>TU(Mc;w*3@f7MnWZ|GO;mCuylL}(&%JPmhJHrMl4y3&;IK}xq|6nx1N+fA~Nr==|! zAFnExlW;3q6)1JISGBPFv%v&NI@UE>uZ-{@<#s8>RxGZ}G|1Z!stZt{P{HqCTVvZ+ zIx6Nt0uPI&E{$oUMN;$*je;Eq(cJccP!$puh7GmxC5-{TmR3LFZ z$Yi*ZsA0#n$qW6lY9#ZJqY#iJW5{O`*EiIjklhmAODSC;VBreo37+bP2!{o1w z>5$Y|hqd`q%15PjB``AUu_{q0+YCpYUS-zy*sHWAhcyKbPqDVC32B+V+UG5MEwsqv z_*00C>63%#F}qj^A%rBDJ;sY!@lD18xynXeZ$~55GHfJ8C->Q@Fi+<1>oKy^5|p+}lM8I0XHsG@0YSC&pxPnfM>$ z89Vs*Lqd3ydd88rSYiL1*SCdib8%rXW6E6);98oRaM6KML z|Bx^Ampg7xZgvnp()xN`r9UtvpGpFv*?N!g+3JEwZhabd@$!a!yqr6BU-^Fnx4kcz=EV4rkCL(Rt$1f=&_Lze~P7!R%X!7;)wt@c%)Jft78e=p#j6*c`g<>%&xk~BoLOjUbJI?bM!gT@O~zzJ zQV1s}Ckq|>ooq5*XBZr`K28dR8u%6R#rQYmr-wGI6Bk(ocNDJLZkX!?{s14#?EN5w zwihy9q@F?w);Hv zfO%>{mYTn{D&mOigd0ANiR_l!ABX=WFolKC-?Ob&wk?KVfNzk@oKssAayvdu?FOxd*msk zI?R58;4C#$7u`%RVr9zufIQB?_QcxTQzryYnX%dS@`vZk}7-B*q+{BaP9i zo+Hb)_F4;v-fvw6BBU+q0x09SUf zf%cC8TPc|pPYJL`MFvOBu}qa&Qa9e9C-f*FdAZ}sam%Af&NKEboC+FAH+~SRzpOOD zRNovJNpT%35*BhVN&19um@DV24W@>vQ2f@93+bj4LD*S1E}>kYW<9Wq3uyTC{PO`o zIF@0I%zbwNGKQNe=lg5Pqst9HCRx9A>adt+XyLjwF70;khPFsle^65Ii`b~le^cZ- zZsN4DKTnI6=BH`_$;5Lg0(?bEW!6Wf!=Gt#4We4^A`HfT&1R9RPCB9Csz~3@9^ zg&(hi!M2IOw~crF*PFy7bul`e(8-w_;x3KsMRx6z-schLP3DUXATMus?*7Nl_9~!; zI#cx*jaa}W$YDa#ttMMd_7^uXbM##}yNgs#^fiI#!Eu*LkIW>Z9$F`BDUI7r?7_bv zSuNUR5w0LEp}f&Mq#p{WpR9RC4&X*Jl?8sJf1`6i!>wUiL#`2VW9dM%cjRjeVUe|B zL;{9bAhcpd?HKKC+FR6fhG^ja^0{zyWRJ+5Jw9@AEu-#U>LI!2m1$vQko zZbF`NR2A<>OvY-V3?5ym^3LQyYJ&Dja&|7c1=sI22RjmEqtZ# z@p-h>FizzGFXn;ikAlAM_`sW%66jf=F4dRBXRk8Qab8vK$18CC5$J3FTHm(!-PE9; z1p-4$%ZUZGPk>%v`YrQy=3h6CFW+4c&tjmVqK!#^waLAfy>)f)O$A>Q;i#)ew_?A$ zo*6EAREHYL$vs6KC0TZ-&Mid{U{I9XM?wU>cddB3VFW~4*5=-HE-pRULZN7am5S3T7H`q#dJYr z08WH4bA={VlnxK=)q>px057!8LOym!a1^o?#lC||m=x`0fIHeHu0b@&2zOC`xyn8X zWX{G+90_7RxubG)S_8xydM7{hi3EnEkFcYNCsk+{`~oRs=`-+29Y0!Q?PHj_HEb#Z zoO2dQafpPS+QpECBf#@8>s-y)kI)~%T84l>x_ZJ{H)gm$e2JnK=B(vhgYTcrBrgbO z>EV5JYF@MhEj@x6JtyK%QL>K=%5Q9mY+EjD1q>NaetsJNg+gkukVo(wZ{;?;FVwwZV37~tavk%lywbEV}-0(0A5~QV;%smt}Y#(0T)tF@4ewoC>gXX^_yBo&rnXJ zU4fY-00g2nuPBxQZJVduRIQwGN+vaCm{Oop&q>_dnTuB#)4D(m{l# zILDONA+~cz)JD-hFQ?35%2A-5a0)^ZBkk;U%=t^axeAA>qGpKh7HFzNXAtX?Mj25s z6XdHgnO}LniVu<^kEsazyP&9xtBM#-KVpj*4)3RbaZ1)b2J25L zTT`TxCROS?jpkNe#Y#j71x^5rbuEfA@1<~85Qt4icxWfW*Ww5_)bB6jE2y> zz5d5`P3P}lAHw)%|JvTvucu8Vx7aOU6)rBUFlWi;X$&>0?&)$lfYyQU?G)daT z+;_@nV@bTCaif;Wwq<>_u{l%U8ohen+fJsb%n$gyHPn?n@lafiEGbzoX$kf`bo9Ek zH&z~93^3FqB>s3fZ7+*OKNHS7K93M`c~SDNk7i<Y8^~ogJ1uZJmm{6hwW@wEHdg8+@flWUJ8M+!kuKA;=jz&@5I+7R zZjtIa%HBg^R`_m8wA-4xe$0T6EO^RKkBIN|Q+>)oYpjwz8F4Lm3#ACP<|`<{4A9v@ zdotC@Jr{>1xeHh_(LodB1oQS$~m(wh!flDF4sup0& zAYk>valf~ol0U$`-h7`v*?^S&h(R}%U`c_26Ykm(^1%Q^hN>sby85lkPG`x{Uagu4 zjeI5Qz#-0|eG@}tu7b^tkG-~tV*nM-DPmqxTYJ<&i`MWJF;>*JROP`;wtEt8WKBC& z+dP)yB_#}cLi%_)OLL7#7r)T#MNcy`1x0oX?uoV9rhcR9Y=)s?MJtwL2B`f35Z~*? ziUcn0Y}^7{N-(uvzWx^}rgDI?s~?$_%U|`qy3N3~?d?@y4JD)LmS)~R5Byk*pPs2a zI=XjijLo`A@JF$CRmgiop7x)rrV5r#0aCSKXiYsfVyAU}>yA2KQxmO=l)wF0nYyr z$gg=Y%oDF;0&&{<*x6vd0={^@XTSEwl~}go?zLGVw8S7AeMxxW`aKDj+`v-*%9k^_he;eW)UBRHi?PFDX?rd_rJCqg_Rk*}6aiD99zl zgebSnZ$=n*Uw;2sG!E8lul1o|loDOKiZb|eqyMDQ(rS!W237Lbj&YLs|?mcUP5JKI7j_5+%c=#-aPKv8F?eoQ*;mebTebUR8$(J1S%MQQa|Ov zLXq5M=a~s=KR-;5I;ZnnC9!>_Jel3o?&M1YxU=d@4}-}@e5m@PKN~&8D+EytkHU2^ z+Rc*KW+OW_pXuCpAb={kbeAP7onISAy+=2GEDLt8-=>ZJBJkO;dwme7t4JqBP<0}2 zd!o)2AsO#>B8qz>2Yp8qS@ucJ6|u42+!5T)q<=(0r0p1Dbx2|W>YWzsl@gZdcuGa7?8UBG@eqczuWqK!9U-Z{;-Ez^SUmp=|KWR! z=&c6Um(urV=My@`+ISPDd=aKYYo&Ivf-5ebbn@~)=#_qg>F<+BwNm2!b5&97jK9u& za_MKb$eiPfo49>W&aJR_*0?nHnDgB>J3kN6fBx1Hd=2TU9ar3jb5yo;p#L5EtD|<7 zXj;*~;j7Ai4fAzuqwqWS;0wOS=6~dGD)!6{mR`_Unpv;I-Utv8Oz*(2xu@QTEw24a zDAaPNK6KozISr0ku3{Rkx`iOe6#L+m=sf^cavszxvl>w0x3^4X z*M~O%#okfoHMdl7F0vn{-jKEiA!H??-UI?uef4VbDUf|lnSP*cj7B!$?o5w(i-K7f ztE(LlUg77si3Upavw>(i(IOXEL;v;HpxzAFw6(DctfgGa1M=wwL^K|I9f6WDKnuY` zM$r6yZE`Bld;sv@5&LxU9{oe+#eeT@Dd9c_iYk0iqmk^-uvKT5Yd-+=zkntU%x^%) zYoL~*4HqyS4(0YlpC9mHmJ6g?kM;hXQ7JRx_)gRv%e{az9stMx(lj-E6l=_9L%8)$ z2hhf~oyj7jGI_1af*~F||AX2!L>2Sl(t;&?{HZXcC@4Kxl81rfs`Yfa5)oBj@Z%Qi zs`BPSOcj2T1tQuau0!(KFKGOa;Ff;)1)B)VH#!2IJse7t#()MqIceN0JAb|#|i!Z{}CQ*7jH*DG|ZLhRgJIR~e| z`}g+lN5Y$C7%{+yveuiM-)7*uG6=qwzv`f@TMT6Ya6OcNjC%g%sy@EJA^&E!qafUfZ#P!9Mg zzOus`2g(vcE}jED--rGY^Jf1OypV3W2mU<+T8`P#3&Lh}fNGt}8DR|)hr_ey0w)L{ z7bk)Ky?j7J`|I!eC(HD!-g3V>IbeWWh!n(CL1H{yP2uTNW~CQ}fG}bWBsJ_^UJ)^= zIOOoKAf$rs+6lCrLqU52Pqss;MbFMSxK4d8IX=LSgfroIg9P5>A#|hOuK}BhBn=nJpA63ihRYu2 zbwsfdo|`4AO@2SRWA90mCg;nhWG(2p0Zt`!87+sLak42*M|NnhR)qR0a$SJyayE8>;#uKXtoE+k`h*5~hBKe7G4~;8Mhuvb+Z2ido($75vE0%1DQMlskDmaV~A)nB-0x>K|brH6-}R7#D}i z#kuU!o{esU$?F-^)fY)eJ0TYmsV|m5*V=8+lX*%TeJ< zjUp~4Bi7PAK58~D`tDiZ*b3XJ&Ns-Km!KaehF_`%Ue$&4*w6JK+e80OtM~b~7x*J@ z_9OC`h~ELLb{TgtV@*M(5zNg3=%IhCL(#)pEYjr6ZGy&X;Llsd6-dB`ap-s&6(1MI zV!=B+lI+FeA*Q-07isSjdrH&M3PO6;OAco*JC$ai^xm31;6E4nDASJmg#$FQ~7vx z3%m-oC&Ff*^Xse=u}($YEV`y5m>Y6n+(`tQfbSxq%ua@ofV!5pDqDd+UZ^_~uT>rZT{BYtBj zN1zk%mVC3rk1n?ss-3S&*rE1+|9GjSr|*p~aY~zYm|jS>$*+}!D>fbVE{O%yGyylZ zXt#iW+M2!@e-hDj_X5>u&1UUFm>H|50)ENJbq+^K)@yGPOajM4Z?9$)@VTn3xBEYt z6?u2tI`vbB8yU zKK@Zl4UG^1M`dg>OabQT#h;(yc$T0QveK)g^loc-jE5XLXS~%7hZWYl@^1k>H52GB ztLR>1?AgG2Dln_ukCpF&JPFv!`IT=gP49H`5iO$th*SFkOm}{lQ~h4vSYsRq_~O4h z@>~1AOzU9kpO}{#OOzGrws?-}!MS7;TZIPcy|zs?a-edn<*K;SmIp>g$!}TlgihOT zt9al~ZFT9Kuyz{n1E$PwfzvpU=$GLkMN9kN9OKyla|Yn~{PP7^Pt6bh#$ye|1zPpe ze}xljgmcjMw-A}^bX$nHLE!?o9mf<(i$GcfdKCu}^cI{=jmebju?<8V^w2Bna4+GG zDC}Fyg-K+_W%5E*W*_hzRh2*nJ~e_X4D1pmD5a%v>PbcffrM8qod4nOJ?l;+WUr*M%g!>ilakO(dw0aXII7XfwwuLbJ?eIB6NN@?&JgJ6n zNz~@^hL&`)PmNDvuC?4%`!&}m zFn~|t-=gsKbas0A$KAv2_V_^Aa^Kx=fLi$$f~$rVsCupuYx?}LvvqNW`2^Ug_sRbJ zCO1Eva^eCsWEC7~;{r)ZbDZJGp`1p!5WqMb$6)-XqdvYwMzQT?m!7i-)8#rV75^AZ z|7%>Z;rE5wrdQ&6)MbPw@FJ4>0p9RIb0`PnU9!~K(?cZk(_xHxA2Z+;4bSs!Sb3-= z*BCmv4lD(97BBbW!d5td3i=NT#$cjiVYq>X6-QUskZ?kC;G7OfctCt&q4~I4(r&1k z4T{_~;XxOY6v;!kT}xh|XHeZ)Ymp*42m!Tl8nZnYe3q1bZEpXzHox7P$9GT)m=VQD z*w``TOZG>&q{Kn3+6ZxGRe>Pm3grs;3u4U-PK8MDpLL}tLTiGWNUhi>I$5e^p;COr zHaP6nK|-QnAjQzw)CTf5nWlZ^jrUc!&2?lu6*r+Q7Zx{~I%wdPM@{vROIhw$3bW32 zp;KO-a_+4hgIv*(Zmr`kcqjBuU+8MAAGl*0;P<)`AvB(r$^w7kP}9MqdBsP$ z?3K{LU1+JQSCn^b^TU;ABsr0tbBAKkvot>Z*OUHY^rqKV0MWHfpR@3er{1JdzKWcW8R59(b`f>*^mgYZe;tgF#zL-HSQ zSR)KbtveCc9rjQXvb-$r9rzOLzc`0g*#@hm%RcFHMOsc91iK8^IJXhL!4**jAP{0G zm7_34a|oE4v@QcLH$za2VJ=#j>Jm;cyvqNH*XA8P08V~0b}!x*g12h2WYHak{C`Rg zwS&M`J~d}9vZ2i~YBB#le=;JO)Np!&`h7<*k1tdksNWrFM94v_it~TeYr3TW;VZWr z(p8tcQTL%wYS;CQ`9YuSG8)@+Mr{Op^sZz#x2>bIlRozFTRGlb0S2$wmS8RV4N$5F za{cV1KHZ7DzeP~USGk+a8bszsyFTpK=8+PD263>rdS7vdOwFhO+85BBzLV(7r+w21 zQ398-9X5uo^@fL6Px+#oFPX5KAOCC8#EAnfmi$HU*b~i1Z@IyFvhkq7ru~Ux3fRjq zLa)0+rPKOw*W{7@!54~NwmcL_qCk(L3+@-k3j7o{_#5;VA zqi5N*VK{cH?6Hz4{V#YB747eh@Ramumk3DCze%f3NV2rORN zIt$ES>?LD5g6EG4miWs*VZeImiFV5~s#J&M$O?TDPAn`ohT$6#guNo8=+;ds3!(I7 zLeO~kBrDzdw*9l9>bD?eI}hIM(OLt8WyIbJB&<}#-X$`8tOM~n@nw{@{2{-4SJU=(Hd9NRCJd01qljtu5P|H}9XY9WMIDE~Z;VT8&B5 zQW>-$#tyPk2-Eu^8Zz?yJn-3b>~yAYMco*wzp%_2e;mbJ@`^IMZrFs^!>XOk3Pli= zPC`!Cr-Nzo>Iwd5NG>TG$OzNPtwE^K+egdbYU3M5bzTOq~g5c&c^6ScT));Q2X?a}<1G?Ilro~lkk>9#&Jy>ycd}nuTPQbHT zpg#v@ZCVp9CKc_T>N4k|YZu&ws(n$z46qs$pJ*r2NZ0b;5dLg12~9~B($`dRN3<$8 zRyF({l@`)*a~3XL-PgfdWt5`!{j?Xwq1n@yqp0lDPl>q?iWufke^j=hmANz0s%wL; z_tmvuAnNA#`h0u3sQuX+ujD(c;3!6?OxAseBt1Gum_GOFx}e=7EzboJUP@5GwjWyA zllWtY<>)n{BEGL$xiJX{_0HwLH>sZMUI?Qm>BT=_bRb6KlWmf6ZP%yf-ImAMnzjv8XHGhnRjmS5=v4= zn+kD6ipY-Cr(M7I4a>C~e??{)E#w|afc`;b8p*m^zgBcu&Oo7vaxfhe$KrzdM@qG_ zA(M<(>huqv^YhLMlZP5uPfYZWZ~Jn2`JcBc^I>tt4KDTn~lL$i2ZVIQ3R)_zx zRoWV(70Qun3?Mc!r#t7g4o!9SuiO{=Ia7!_n#~3KgBp1#pD%GyKl^HUv*Led>GuEc zfkraf~p;$%ir_kMo1R*Usd=Qx9S7eXBXY)zIo4|W+Tg@v+I<Q!bMC zTjW=i8T#*l$e>5xCUIT~midrfy=|arWLy_5ObJ z+MYg4UvtKM@0yFN4)JUt)P%fVO-8|oI2_`1%Mpe< zBk4ej7c{!P50Sa#UmTEwHZoC`zTODmYrBx=|8mj(r8n&fRu-SaogZV@+f?YP{Grny zHxLD`#}~@Fu_tSfd#VdEo|z|Rei)6^Qp!b-f?<)wdz$eEXGn54)ONCeks{g2wlBLI z=t_K?l8FT$thZaC-DA1_U3RwqrFF&>UQVr1@|;XLJ$w5+q4#dS;`?KF;P)kZoFHcQo!!Izd%m6k3_J+Pd&(J(PTc<>4s==Qx=^baL`D3hjKp_ z{?+{;eD;ubl!dLe$M)buHu0!FH{!BYqowD9S`?>9<+XzXdg@JyWyhlUPNUT!;0(Y# zADoBGZ3=b0K$KAwqH+9<#6%U$3GSyyB#7y)Sshl>4(4gI48M|bfD9cZ%_dY9p^_8j zIOg1?1!x5!HmyWVgMt`)L*!nbrw~s_Ot1%y`rrysgS)MSkH;sG!h}JqR+ST0jwGx# z`8GKR>;Pt6K39p);*@kt``d5CXtI$%6Kqk-MvwaW15{L?^3{ciaIp_xuygC~{;3R8 zq0gF2>lYAYQ74YM&rzdP*(DQ^eA~N=BF0izqEWl@Vosxw;jJNho@Jj@g@G6Qz@Byb zLEzT*ua>T^NMLK6*)T8^b(*R7$F;ALa3J!L0Jk>IPDx@OgL2Rbj)--EzM(#pD(1Q& z@7>vM&sc5Qf&fAh>t$QxL{CC`*Io_!?cwl+Mzqty1n)oBkcmi zWRF2JQP&Mn;|H*Jbd*9C#-C?t9Iz4kuXlr&*Ejo}_#32XeN_{Q;P*R{|NG>C=rtVs z?IQnH)SHS;{wq;uiLM0hC&s-+$(yQaE_-^H79kJTz bMv44-iF{)TkobFs8>j{%kqq(%3i5vd5|zF@ literal 0 HcmV?d00001 diff --git a/charts/lighthouse/values.yaml b/charts/lighthouse/values.yaml index 54f43949c..e53482b9b 100644 --- a/charts/lighthouse/values.yaml +++ b/charts/lighthouse/values.yaml @@ -1,80 +1,57 @@ git: # git.kind -- Git SCM provider (`github`, `gitlab`, `stash`) kind: github - # git.server -- Git server URL server: "" - # lighthouseJobNamespace -- Namespace where `LighthouseJob`s and `Pod`s are created # @default -- Deployment namespace lighthouseJobNamespace: "" - githubApp: # githubApp.enabled -- Enables GitHub app authentication enabled: false - # githubApp.username -- GitHub app user name - username: "jenkins-x[bot]" - + username: "jenkins-x[bot]" # user -- Git user name (used when GitHub app authentication is not enabled) user: "" - # oauthToken -- Git token (used when GitHub app authentication is not enabled) oauthToken: "" - # oauthSecretName -- Existing Git token secret oauthSecretName: "" - # oauthTokenVolumeMount -- Mount Git token as a volume instead of using an environment variable Secret reference (used when GitHub app authentication is not enabled) oauthTokenVolumeMount: enabled: false - # hmacToken -- Secret used for webhooks hmacToken: "" - # hmacSecretName -- Existing hmac secret to use for webhooks hmacSecretName: "" - # hmacTokenEnabled -- Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud hmacTokenEnabled: true - # hmacTokenVolumeMount -- Mount hmac token as a volume instead of using an environment variable Secret reference hmacTokenVolumeMount: enabled: false - # logFormat -- Log format either json or stackdriver logFormat: "json" - # logService -- The name of the service registered with logging logService: "" - # logStackSkip -- Comma separated stack frames to skip from the log logStackSkip: "" - # scope -- limit permissions to namespace privileges scope: "cluster" - cluster: crds: # cluster.crds.create -- Create custom resource definitions create: true - image: # image.parentRepository -- Docker registry to pull images from parentRepository: ghcr.io/jenkins-x - # image.tag -- Docker images tag # the following tag is latest on the main branch, it's a specific version on a git tag - tag: latest - + tag: 1.17.4 # image.pullPolicy -- Image pull policy pullPolicy: IfNotPresent - # env -- Environment variables env: JX_DEFAULT_IMAGE: "" - - externalPlugins: - name: cd-indicators requiredResources: @@ -86,392 +63,287 @@ externalPlugins: - kind: Service namespace: jx name: lighthouse-webui-plugin - gcJobs: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # gcJobs.maxAge -- Max age from which `LighthouseJob`s will be deleted maxAge: 168h - # gcJobs.schedule -- Cron expression to periodically delete `LighthouseJob`s schedule: "0/30 * * * *" - # gcJobs.failedJobsHistoryLimit -- Drives the failed jobs history limit failedJobsHistoryLimit: 1 - # gcJobs.successfulJobsHistoryLimit -- Drives the successful jobs history limit successfulJobsHistoryLimit: 3 - # gcJobs.concurrencyPolicy -- Drives the job's concurrency policy concurrencyPolicy: Forbid - # gcJobs.backoffLimit -- Drives the job's backoff limit backoffLimit: 6 - image: # gcJobs.image.repository -- Template for computing the gc job docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-gc-jobs" - # gcJobs.image.tag -- Template for computing the gc job docker image tag tag: "{{ .Values.image.tag }}" - # gcJobs.image.pullPolicy -- Template for computing the gc job docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - webhooks: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # webhooks.replicaCount -- Number of replicas replicaCount: 1 - # webhooks.terminationGracePeriodSeconds -- Termination grace period for webhooks pods terminationGracePeriodSeconds: 180 - image: # webhooks.image.repository -- Template for computing the webhooks controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-webhooks" - # webhooks.image.tag -- Template for computing the webhooks controller docker image tag tag: "{{ .Values.image.tag }}" - # webhooks.image.pullPolicy -- Template for computing the webhooks controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - - # webhooks.labels -- allow optional labels to be added to the webhook deployment labels: {} podLabels: {} - # webhooks.podAnnotations -- Annotations applied to the webhooks pods podAnnotations: {} - # webhooks.serviceName -- Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out serviceName: hook - # webhooks.service -- Service settings for the webhooks controller service: type: ClusterIP externalPort: 80 internalPort: 8080 annotations: {} - resources: # webhooks.resources.limits -- Resource limits applied to the webhooks pods limits: cpu: 100m # may require more memory to perform the initial 'git clone' cmd for big repositories memory: 512Mi - # webhooks.resources.requests -- Resource requests applied to the webhooks pods requests: cpu: 80m memory: 128Mi - # webhooks.probe -- Liveness and readiness probes settings probe: path: / - # webhooks.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # webhooks.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # webhooks.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods nodeSelector: {} - # webhooks.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods affinity: {} - # webhooks.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods tolerations: [] - ingress: # webhooks.ingress.enabled -- Enable webhooks ingress enabled: false - # webhooks.ingress.annotations -- Webhooks ingress annotations annotations: {} - # webhooks.ingress.ingressClassName -- Webhooks ingress ingressClassName ingressClassName: null - # webhooks.ingress.hosts -- Webhooks ingress host names hosts: [] - tls: # webhooks.ingress.tls.enabled -- Enable webhooks ingress tls enabled: false # webhooks.ingress.tls.secretName -- Specify webhooks ingress tls secretName secretName: "" - # webhooks.customDeploymentTriggerCommand -- deployments can configure the ability to allow custom lighthouse triggers # using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify # `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing customDeploymentTriggerCommand: "" - foghorn: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # foghorn.replicaCount -- Number of replicas replicaCount: 1 - # foghorn.terminationGracePeriodSeconds -- Termination grace period for foghorn pods terminationGracePeriodSeconds: 180 - image: # foghorn.image.repository -- Template for computing the foghorn controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-foghorn" - # foghorn.image.tag -- Template for computing the foghorn controller docker image tag tag: "{{ .Values.image.tag }}" - # foghorn.image.pullPolicy -- Template for computing the foghorn controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - resources: # foghorn.resources.limits -- Resource limits applied to the foghorn pods limits: cpu: 100m memory: 256Mi - # foghorn.resources.requests -- Resource requests applied to the foghorn pods requests: cpu: 80m memory: 128Mi - # foghorn.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods nodeSelector: {} - # foghorn.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods affinity: {} - # foghorn.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods tolerations: [] - - tektoncontroller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # tektoncontroller.dashboardURL -- the dashboard URL (e.g. Tekton dashboard) dashboardURL: '' # tektoncontroller.dashboardTemplate -- Go template expression for URLs in the dashboard if not using Tekton dashboard dashboardTemplate: '' - # tektoncontroller.replicaCount -- Number of replicas replicaCount: 1 - # tektoncontroller.terminationGracePeriodSeconds -- Termination grace period for tekton controller pods terminationGracePeriodSeconds: 180 - image: # tektoncontroller.image.repository -- Template for computing the tekton controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-tekton-controller" - # tektoncontroller.image.tag -- Template for computing the tekton controller docker image tag tag: "{{ .Values.image.tag }}" - # tektoncontroller.image.pullPolicy -- Template for computing the tekton controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # tektoncontroller.podAnnotations -- Annotations applied to the tekton controller pods podAnnotations: {} - # tektoncontroller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods nodeSelector: {} - # tektoncontroller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods affinity: {} - # tektoncontroller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods tolerations: [] - resources: # tektoncontroller.resources.limits -- Resource limits applied to the tekton controller pods limits: cpu: 100m memory: 256Mi - # tektoncontroller.resources.requests -- Resource requests applied to the tekton controller pods requests: cpu: 80m memory: 128Mi - # tektoncontroller.service -- Service settings for the tekton controller service: annotations: {} - jenkinscontroller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # jenkinscontroller.jenkinsURL -- The URL of the Jenkins instance jenkinsURL: - # jenkinscontroller.jenkinsUser -- The username for the Jenkins user jenkinsUser: - # jenkinscontroller.jenkinsToken -- The token for authenticating the Jenkins user jenkinsToken: - # jenkinscontroller.terminationGracePeriodSeconds -- Termination grace period for tekton controller pods terminationGracePeriodSeconds: 180 - image: # jenkinscontroller.image.repository -- Template for computing the Jenkins controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller" - # jenkinscontroller.image.tag -- Template for computing the tekton controller docker image tag tag: "{{ .Values.image.tag }}" - # jenkinscontroller.image.pullPolicy -- Template for computing the tekton controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # jenkinscontroller.podAnnotations -- Annotations applied to the tekton controller pods podAnnotations: {} - # jenkinscontroller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods nodeSelector: {} - # jenkinscontroller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods affinity: {} - # jenkinscontroller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods tolerations: [] - resources: # jenkinscontroller.resources.limits -- Resource limits applied to the tekton controller pods limits: cpu: 100m memory: 256Mi - # jenkinscontroller.resources.requests -- Resource requests applied to the tekton controller pods requests: cpu: 80m memory: 128Mi - # jenkinscontroller.service -- Service settings for the tekton controller service: annotations: {} - keeper: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # keeper.statusContextLabel -- Label used to report status to git provider statusContextLabel: "Lighthouse Merge Status" - # keeper.replicaCount -- Number of replicas replicaCount: 1 - # keeper.terminationGracePeriodSeconds -- Termination grace period for keeper pods terminationGracePeriodSeconds: 30 - image: # keeper.image.repository -- Template for computing the keeper controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-keeper" - # keeper.image.tag -- Template for computing the keeper controller docker image tag tag: "{{ .Values.image.tag }}" - # keeper.image.pullPolicy -- Template for computing the keeper controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # keeper.podAnnotations -- Annotations applied to the keeper pods podAnnotations: {} - # keeper.env -- Lets you define keeper specific environment variables env: {} - # keeper.service -- Service settings for the keeper controller service: type: ClusterIP externalPort: 80 internalPort: 8888 - resources: # keeper.resources.limits -- Resource limits applied to the keeper pods limits: cpu: 400m memory: 512Mi - # keeper.resources.requests -- Resource requests applied to the keeper pods requests: cpu: 100m memory: 128Mi - # keeper.probe -- Liveness and readiness probes settings probe: path: / - # keeper.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 120 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # keeper.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - datadog: # keeper.datadog.enabled -- Enables datadog enabled: "true" - # keeper.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods nodeSelector: {} - # keeper.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods affinity: {} - # keeper.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods tolerations: [] - poller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # poller.enabled -- Whether to enable or disable the poller component enabled: false - # poller.replicaCount -- Number of replicas replicaCount: 1 - # poller.terminationGracePeriodSeconds -- Termination grace period for poller pods terminationGracePeriodSeconds: 30 - image: # poller.image.repository -- Template for computing the poller controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-poller" - # poller.image.tag -- Template for computing the poller controller docker image tag tag: "{{ .Values.image.tag }}" - # poller.image.pullPolicy -- Template for computing the poller controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # poller.podAnnotations -- Annotations applied to the poller pods podAnnotations: {} - # poller.env -- Lets you define poller specific environment variables env: # poller.env.POLL_PERIOD the default time period between polling releases and pull requests POLL_PERIOD: 20s - # poller.env.POLL_RELEASE_PERIOD the time period between polling releases # POLL_RELEASE_PERIOD: 20s @@ -480,77 +352,58 @@ poller: # poller.env.POLL_HOOK_ENDPOINT the hook service endpoint to post webhooks to POLL_HOOK_ENDPOINT: http://hook/hook/poll - # poller.contextMatchPattern -- Regex pattern to use to match commit status context contextMatchPattern: "" - # poller.requireReleaseSuccess -- Keep polling releases until the most recent commit status is successful requireReleaseSuccess: false - resources: # poller.resources.limits -- Resource limits applied to the poller pods limits: cpu: 400m memory: 512Mi - # poller.resources.requests -- Resource requests applied to the poller pods requests: cpu: 100m memory: 128Mi - # poller.probe -- Liveness and readiness probes settings probe: path: / - # keeper.internalPort -- The internal port used to view metrics etc internalPort: 8888 - # poller.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 120 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # poller.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - datadog: # poller.datadog.enabled -- Enables datadog enabled: "true" - # poller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods nodeSelector: {} - # poller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods affinity: {} - # poller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods tolerations: [] - engines: # engines.jx -- Enables the jx engine jx: true - # engines.tekton -- Enables the tekton engine tekton: false - # engines.jenkins -- Enables the Jenkins engine jenkins: false - configMaps: # configMaps.create -- Enables creation of `config.yaml` and `plugins.yaml` config maps create: false - # configMaps.config -- Raw `config.yaml` content config: null - # configMaps.plugins -- Raw `plugins.yaml` content plugins: null - # configMaps.configUpdater -- Settings used to configure the `config-updater` plugin configUpdater: orgAndRepo: ""