From 36c59da5bdb432ac31fffada3427f472feba29ce Mon Sep 17 00:00:00 2001 From: jenkins-x-bot Date: Mon, 24 Jul 2023 10:23:45 +0000 Subject: [PATCH] chore: release 1.13.2 --- changelog.md | 10 + charts/lighthouse/Chart.yaml | 4 +- charts/lighthouse/README.md | 308 ++++++++++++------------ charts/lighthouse/lighthouse-1.13.2.tgz | Bin 0 -> 15767 bytes charts/lighthouse/values.yaml | 151 +----------- 5 files changed, 170 insertions(+), 303 deletions(-) create mode 100644 changelog.md create mode 100644 charts/lighthouse/lighthouse-1.13.2.tgz diff --git a/changelog.md b/changelog.md new file mode 100644 index 000000000..9e5ea415e --- /dev/null +++ b/changelog.md @@ -0,0 +1,10 @@ + +## Changes in version 1.13.2 + +### Bug Fixes + +* allow params from previous tasks results (JordanGoasdoue) + +### Chores + +* add pipeline-params-from-tasks-results test (JordanGoasdoue) diff --git a/charts/lighthouse/Chart.yaml b/charts/lighthouse/Chart.yaml index edc18364e..b9190af85 100644 --- a/charts/lighthouse/Chart.yaml +++ b/charts/lighthouse/Chart.yaml @@ -3,6 +3,6 @@ description: | This chart bootstraps installation of [Lighthouse](https://github.com/jenkins-x/lighthouse). icon: https://raw.githubusercontent.com/jenkins-x/jenkins-x-website/master/images/logo/jenkinsx-icon-color.svg name: lighthouse -version: 0.1.0-SNAPSHOT +version: 1.13.2 home: https://github.com/jenkins-x/lighthouse - +appVersion: 1.13.2 diff --git a/charts/lighthouse/README.md b/charts/lighthouse/README.md index ed4700eef..48370b1c7 100644 --- a/charts/lighthouse/README.md +++ b/charts/lighthouse/README.md @@ -42,157 +42,161 @@ helm uninstall my-lighthouse --namespace lighthouse ## Values -| Key | Type | Description | Default | -| --------------------------------------------------- | ------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------- | -| `cluster.crds.create` | bool | Create custom resource definitions | `true` | -| `configMaps.config` | string | Raw `config.yaml` content | `nil` | -| `configMaps.configUpdater` | object | Settings used to configure the `config-updater` plugin | `{"orgAndRepo":"","path":""}` | -| `configMaps.create` | bool | Enables creation of `config.yaml` and `plugins.yaml` config maps | `false` | -| `configMaps.plugins` | string | Raw `plugins.yaml` content | `nil` | -| `engines.jenkins` | bool | Enables the Jenkins engine | `false` | -| `engines.jx` | bool | Enables the jx engine | `true` | -| `engines.tekton` | bool | Enables the tekton engine | `false` | -| `env` | object | Environment variables | `{"JX_DEFAULT_IMAGE":""}` | -| `externalPlugins[0].name` | string | | `"cd-indicators"` | -| `externalPlugins[0].requiredResources[0].kind` | string | | `"Service"` | -| `externalPlugins[0].requiredResources[0].name` | string | | `"cd-indicators"` | -| `externalPlugins[0].requiredResources[0].namespace` | string | | `"jx"` | -| `externalPlugins[1].name` | string | | `"lighthouse-webui-plugin"` | -| `externalPlugins[1].requiredResources[0].kind` | string | | `"Service"` | -| `externalPlugins[1].requiredResources[0].name` | string | | `"lighthouse-webui-plugin"` | -| `externalPlugins[1].requiredResources[0].namespace` | string | | `"jx"` | -| `foghorn.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods | `{}` | -| `foghorn.image.pullPolicy` | string | Template for computing the foghorn controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `foghorn.image.repository` | string | Template for computing the foghorn controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-foghorn"` | -| `foghorn.image.tag` | string | Template for computing the foghorn controller docker image tag | `"{{ .Values.image.tag }}"` | -| `foghorn.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `foghorn.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods | `{}` | -| `foghorn.replicaCount` | int | Number of replicas | `1` | -| `foghorn.resources.limits` | object | Resource limits applied to the foghorn pods | `{"cpu":"100m","memory":"256Mi"}` | -| `foghorn.resources.requests` | object | Resource requests applied to the foghorn pods | `{"cpu":"80m","memory":"128Mi"}` | -| `foghorn.terminationGracePeriodSeconds` | int | Termination grace period for foghorn pods | `180` | -| `foghorn.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods | `[]` | -| `gcJobs.backoffLimit` | int | Set the backoff limit for failed cronJobs | `6` | -| `gcJobs.concurrencyPolicy` | string | Drives the job's concurrency policy | `"Forbid"` | -| `gcJobs.failedJobsHistoryLimit` | int | Drives the failed jobs history limit | `1` | -| `gcJobs.image.pullPolicy` | string | Template for computing the gc job docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `gcJobs.image.repository` | string | Template for computing the gc job docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-gc-jobs"` | -| `gcJobs.image.tag` | string | Template for computing the gc job docker image tag | `"{{ .Values.image.tag }}"` | -| `gcJobs.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `gcJobs.maxAge` | string | Max age from which `LighthouseJob`s will be deleted | `"168h"` | -| `gcJobs.schedule` | string | Cron expression to periodically delete `LighthouseJob`s | `"0/30 * * * *"` | -| `gcJobs.successfulJobsHistoryLimit` | int | Drives the successful jobs history limit | `3` | -| `git.kind` | string | Git SCM provider (`github`, `gitlab`, `stash`) | `"github"` | -| `git.server` | string | Git server URL | `""` | -| `githubApp.enabled` | bool | Enables GitHub app authentication | `false` | -| `githubApp.username` | string | GitHub app user name | `"jenkins-x[bot]"` | -| `hmacSecretName` | string | Existing hmac secret to use for webhooks | `""` | -| `hmacToken` | string | Secret used for webhooks | `""` | -| `hmacTokenEnabled` | bool | Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud | `true` | -| `image.parentRepository` | string | Docker registry to pull images from | `"ghcr.io/jenkins-x"` | -| `image.pullPolicy` | string | Image pull policy | `"IfNotPresent"` | -| `image.tag` | string | Docker images tag the following tag is latest on the main branch, it's a specific version on a git tag | `"latest"` | -| `jenkinscontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | -| `jenkinscontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `jenkinscontroller.image.repository` | string | Template for computing the Jenkins controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller"` | -| `jenkinscontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | -| `jenkinscontroller.jenkinsToken` | string | The token for authenticating the Jenkins user | `nil` | -| `jenkinscontroller.jenkinsURL` | string | The URL of the Jenkins instance | `nil` | -| `jenkinscontroller.jenkinsUser` | string | The username for the Jenkins user | `nil` | -| `jenkinscontroller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `jenkinscontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | -| `jenkinscontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | -| `jenkinscontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | -| `jenkinscontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | -| `jenkinscontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | -| `jenkinscontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | -| `jenkinscontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | -| `keeper.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods | `{}` | -| `keeper.datadog.enabled` | string | Enables datadog | `"true"` | -| `keeper.env` | object | Lets you define keeper specific environment variables | `{}` | -| `keeper.image.pullPolicy` | string | Template for computing the keeper controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `keeper.image.repository` | string | Template for computing the keeper controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-keeper"` | -| `keeper.image.tag` | string | Template for computing the keeper controller docker image tag | `"{{ .Values.image.tag }}"` | -| `keeper.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `keeper.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `keeper.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods | `{}` | -| `keeper.podAnnotations` | object | Annotations applied to the keeper pods | `{}` | -| `keeper.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `keeper.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `keeper.replicaCount` | int | Number of replicas | `1` | -| `keeper.resources.limits` | object | Resource limits applied to the keeper pods | `{"cpu":"400m","memory":"512Mi"}` | -| `keeper.resources.requests` | object | Resource requests applied to the keeper pods | `{"cpu":"100m","memory":"128Mi"}` | -| `keeper.service` | object | Service settings for the keeper controller | `{"externalPort":80,"internalPort":8888,"type":"ClusterIP"}` | -| `keeper.statusContextLabel` | string | Label used to report status to git provider | `"Lighthouse Merge Status"` | -| `keeper.terminationGracePeriodSeconds` | int | Termination grace period for keeper pods | `30` | -| `keeper.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods | `[]` | -| `lighthouseJobNamespace` | string | Namespace where `LighthouseJob`s and `Pod`s are created | Deployment namespace | -| `logFormat` | string | Log format either json or stackdriver | `"json"` | -| `logService` | string | The name of the service registered with logging | `""` | -| `logStackSkip` | string | Comma separated stack frames to skip from the log | `""` | -| `oauthSecretName` | string | Existing Git token secret | `""` | -| `oauthToken` | string | Git token (used when GitHub app authentication is not enabled) | `""` | -| `poller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods | `{}` | -| `poller.contextMatchPattern` | string | Regex pattern to use to match commit status context | `""` | -| `poller.datadog.enabled` | string | Enables datadog | `"true"` | -| `poller.enabled` | bool | Whether to enable or disable the poller component | `false` | -| `poller.env` | object | Lets you define poller specific environment variables | `{"POLL_HOOK_ENDPOINT":"http://hook/hook/poll","POLL_PERIOD":"20s"}` | -| `poller.image.pullPolicy` | string | Template for computing the poller controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `poller.image.repository` | string | Template for computing the poller controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-poller"` | -| `poller.image.tag` | string | Template for computing the poller controller docker image tag | `"{{ .Values.image.tag }}"` | -| `poller.internalPort` | int | | `8888` | -| `poller.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `poller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `poller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods | `{}` | -| `poller.podAnnotations` | object | Annotations applied to the poller pods | `{}` | -| `poller.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `poller.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `poller.replicaCount` | int | Number of replicas | `1` | -| `poller.requireReleaseSuccess` | bool | Keep polling releases until the most recent commit status is successful | `false` | -| `poller.resources.limits` | object | Resource limits applied to the poller pods | `{"cpu":"400m","memory":"512Mi"}` | -| `poller.resources.requests` | object | Resource requests applied to the poller pods | `{"cpu":"100m","memory":"128Mi"}` | -| `poller.terminationGracePeriodSeconds` | int | Termination grace period for poller pods | `30` | -| `poller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods | `[]` | -| `scope` | string | set scope to either `cluster` or `namespace` for permissions | `cluster` | -| `tektoncontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | -| `tektoncontroller.dashboardTemplate` | string | Go template expression for URLs in the dashboard if not using Tekton dashboard | `""` | -| `tektoncontroller.dashboardURL` | string | the dashboard URL (e.g. Tekton dashboard) | `""` | -| `tektoncontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `tektoncontroller.image.repository` | string | Template for computing the tekton controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-tekton-controller"` | -| `tektoncontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | -| `tektoncontroller.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `tektoncontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | -| `tektoncontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | -| `tektoncontroller.replicaCount` | int | Number of replicas | `1` | -| `tektoncontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | -| `tektoncontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | -| `tektoncontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | -| `tektoncontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | -| `tektoncontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | -| `user` | string | Git user name (used when GitHub app authentication is not enabled) | `""` | -| `webhooks.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods | `{}` | -| `webhooks.customDeploymentTriggerCommand` | string | deployments can configure the ability to allow custom lighthouse triggers using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing | `""` | -| `webhooks.image.pullPolicy` | string | Template for computing the webhooks controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | -| `webhooks.image.repository` | string | Template for computing the webhooks controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-webhooks"` | -| `webhooks.image.tag` | string | Template for computing the webhooks controller docker image tag | `"{{ .Values.image.tag }}"` | -| `webhooks.ingress.annotations` | object | Webhooks ingress annotations | `{}` | -| `webhooks.ingress.enabled` | bool | Enable webhooks ingress | `false` | -| `webhooks.ingress.hosts` | list | Webhooks ingress host names | `[]` | -| `webhooks.ingress.ingressClassName` | string | Webhooks ingress ingressClassName | `nil` | -| `webhooks.labels` | object | allow optional labels to be added to the webhook deployment | `{}` | -| `webhooks.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `webhooks.logLevel` | string | The logging level: trace, debug, info, warn, panic, fatal | `"info"` | -| `webhooks.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods | `{}` | -| `webhooks.podAnnotations` | object | Annotations applied to the webhooks pods | `{}` | -| `webhooks.podLabels` | object | | `{}` | -| `webhooks.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | -| `webhooks.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | -| `webhooks.replicaCount` | int | Number of replicas | `1` | -| `webhooks.resources.limits` | object | Resource limits applied to the webhooks pods | `{"cpu":"100m","memory":"512Mi"}` | -| `webhooks.resources.requests` | object | Resource requests applied to the webhooks pods | `{"cpu":"80m","memory":"128Mi"}` | -| `webhooks.service` | object | Service settings for the webhooks controller | `{"annotations":{},"externalPort":80,"internalPort":8080,"type":"ClusterIP"}` | -| `webhooks.serviceName` | string | Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out | `"hook"` | -| `webhooks.terminationGracePeriodSeconds` | int | Termination grace period for webhooks pods | `180` | -| `webhooks.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods | `[]` | +| Key | Type | Description | Default | +|-----|------|-------------|---------| +| `cluster.crds.create` | bool | Create custom resource definitions | `true` | +| `configMaps.config` | string | Raw `config.yaml` content | `nil` | +| `configMaps.configUpdater` | object | Settings used to configure the `config-updater` plugin | `{"orgAndRepo":"","path":""}` | +| `configMaps.create` | bool | Enables creation of `config.yaml` and `plugins.yaml` config maps | `false` | +| `configMaps.plugins` | string | Raw `plugins.yaml` content | `nil` | +| `engines.jenkins` | bool | Enables the Jenkins engine | `false` | +| `engines.jx` | bool | Enables the jx engine | `true` | +| `engines.tekton` | bool | Enables the tekton engine | `false` | +| `env` | object | Environment variables | `{"JX_DEFAULT_IMAGE":""}` | +| `externalPlugins[0].name` | string | | `"cd-indicators"` | +| `externalPlugins[0].requiredResources[0].kind` | string | | `"Service"` | +| `externalPlugins[0].requiredResources[0].name` | string | | `"cd-indicators"` | +| `externalPlugins[0].requiredResources[0].namespace` | string | | `"jx"` | +| `externalPlugins[1].name` | string | | `"lighthouse-webui-plugin"` | +| `externalPlugins[1].requiredResources[0].kind` | string | | `"Service"` | +| `externalPlugins[1].requiredResources[0].name` | string | | `"lighthouse-webui-plugin"` | +| `externalPlugins[1].requiredResources[0].namespace` | string | | `"jx"` | +| `foghorn.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods | `{}` | +| `foghorn.image.pullPolicy` | string | Template for computing the foghorn controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `foghorn.image.repository` | string | Template for computing the foghorn controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-foghorn"` | +| `foghorn.image.tag` | string | Template for computing the foghorn controller docker image tag | `"{{ .Values.image.tag }}"` | +| `foghorn.logLevel` | string | | `"info"` | +| `foghorn.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods | `{}` | +| `foghorn.replicaCount` | int | Number of replicas | `1` | +| `foghorn.resources.limits` | object | Resource limits applied to the foghorn pods | `{"cpu":"100m","memory":"256Mi"}` | +| `foghorn.resources.requests` | object | Resource requests applied to the foghorn pods | `{"cpu":"80m","memory":"128Mi"}` | +| `foghorn.terminationGracePeriodSeconds` | int | Termination grace period for foghorn pods | `180` | +| `foghorn.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods | `[]` | +| `gcJobs.backoffLimit` | int | Drives the job's backoff limit | `6` | +| `gcJobs.concurrencyPolicy` | string | Drives the job's concurrency policy | `"Forbid"` | +| `gcJobs.failedJobsHistoryLimit` | int | Drives the failed jobs history limit | `1` | +| `gcJobs.image.pullPolicy` | string | Template for computing the gc job docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `gcJobs.image.repository` | string | Template for computing the gc job docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-gc-jobs"` | +| `gcJobs.image.tag` | string | Template for computing the gc job docker image tag | `"{{ .Values.image.tag }}"` | +| `gcJobs.logLevel` | string | | `"info"` | +| `gcJobs.maxAge` | string | Max age from which `LighthouseJob`s will be deleted | `"168h"` | +| `gcJobs.schedule` | string | Cron expression to periodically delete `LighthouseJob`s | `"0/30 * * * *"` | +| `gcJobs.successfulJobsHistoryLimit` | int | Drives the successful jobs history limit | `3` | +| `git.kind` | string | Git SCM provider (`github`, `gitlab`, `stash`) | `"github"` | +| `git.server` | string | Git server URL | `""` | +| `githubApp.enabled` | bool | Enables GitHub app authentication | `false` | +| `githubApp.username` | string | GitHub app user name | `"jenkins-x[bot]"` | +| `hmacSecretName` | string | Existing hmac secret to use for webhooks | `""` | +| `hmacToken` | string | Secret used for webhooks | `""` | +| `hmacTokenEnabled` | bool | Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud | `true` | +| `hmacTokenVolumeMount` | object | Mount hmac token as a volume instead of using an environment variable Secret reference | `{"enabled":false}` | +| `image.parentRepository` | string | Docker registry to pull images from | `"ghcr.io/jenkins-x"` | +| `image.pullPolicy` | string | Image pull policy | `"IfNotPresent"` | +| `image.tag` | string | Docker images tag the following tag is latest on the main branch, it's a specific version on a git tag | `"latest"` | +| `jenkinscontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | +| `jenkinscontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `jenkinscontroller.image.repository` | string | Template for computing the Jenkins controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller"` | +| `jenkinscontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | +| `jenkinscontroller.jenkinsToken` | string | The token for authenticating the Jenkins user | `nil` | +| `jenkinscontroller.jenkinsURL` | string | The URL of the Jenkins instance | `nil` | +| `jenkinscontroller.jenkinsUser` | string | The username for the Jenkins user | `nil` | +| `jenkinscontroller.logLevel` | string | | `"info"` | +| `jenkinscontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | +| `jenkinscontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | +| `jenkinscontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | +| `jenkinscontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | +| `jenkinscontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | +| `jenkinscontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | +| `jenkinscontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | +| `keeper.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods | `{}` | +| `keeper.datadog.enabled` | string | Enables datadog | `"true"` | +| `keeper.env` | object | Lets you define keeper specific environment variables | `{}` | +| `keeper.image.pullPolicy` | string | Template for computing the keeper controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `keeper.image.repository` | string | Template for computing the keeper controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-keeper"` | +| `keeper.image.tag` | string | Template for computing the keeper controller docker image tag | `"{{ .Values.image.tag }}"` | +| `keeper.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `keeper.logLevel` | string | | `"info"` | +| `keeper.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods | `{}` | +| `keeper.podAnnotations` | object | Annotations applied to the keeper pods | `{}` | +| `keeper.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `keeper.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `keeper.replicaCount` | int | Number of replicas | `1` | +| `keeper.resources.limits` | object | Resource limits applied to the keeper pods | `{"cpu":"400m","memory":"512Mi"}` | +| `keeper.resources.requests` | object | Resource requests applied to the keeper pods | `{"cpu":"100m","memory":"128Mi"}` | +| `keeper.service` | object | Service settings for the keeper controller | `{"externalPort":80,"internalPort":8888,"type":"ClusterIP"}` | +| `keeper.statusContextLabel` | string | Label used to report status to git provider | `"Lighthouse Merge Status"` | +| `keeper.terminationGracePeriodSeconds` | int | Termination grace period for keeper pods | `30` | +| `keeper.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods | `[]` | +| `lighthouseJobNamespace` | string | Namespace where `LighthouseJob`s and `Pod`s are created | Deployment namespace | +| `logFormat` | string | Log format either json or stackdriver | `"json"` | +| `logService` | string | The name of the service registered with logging | `""` | +| `logStackSkip` | string | Comma separated stack frames to skip from the log | `""` | +| `oauthSecretName` | string | Existing Git token secret | `""` | +| `oauthToken` | string | Git token (used when GitHub app authentication is not enabled) | `""` | +| `oauthTokenVolumeMount` | object | Mount Git token as a volume instead of using an environment variable Secret reference (used when GitHub app authentication is not enabled) | `{"enabled":false}` | +| `poller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods | `{}` | +| `poller.contextMatchPattern` | string | Regex pattern to use to match commit status context | `""` | +| `poller.datadog.enabled` | string | Enables datadog | `"true"` | +| `poller.enabled` | bool | Whether to enable or disable the poller component | `false` | +| `poller.env` | object | Lets you define poller specific environment variables | `{"POLL_HOOK_ENDPOINT":"http://hook/hook/poll","POLL_PERIOD":"20s"}` | +| `poller.image.pullPolicy` | string | Template for computing the poller controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `poller.image.repository` | string | Template for computing the poller controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-poller"` | +| `poller.image.tag` | string | Template for computing the poller controller docker image tag | `"{{ .Values.image.tag }}"` | +| `poller.internalPort` | int | | `8888` | +| `poller.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":120,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `poller.logLevel` | string | | `"info"` | +| `poller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods | `{}` | +| `poller.podAnnotations` | object | Annotations applied to the poller pods | `{}` | +| `poller.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `poller.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `poller.replicaCount` | int | Number of replicas | `1` | +| `poller.requireReleaseSuccess` | bool | Keep polling releases until the most recent commit status is successful | `false` | +| `poller.resources.limits` | object | Resource limits applied to the poller pods | `{"cpu":"400m","memory":"512Mi"}` | +| `poller.resources.requests` | object | Resource requests applied to the poller pods | `{"cpu":"100m","memory":"128Mi"}` | +| `poller.terminationGracePeriodSeconds` | int | Termination grace period for poller pods | `30` | +| `poller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods | `[]` | +| `scope` | string | limit permissions to namespace privileges | `"cluster"` | +| `tektoncontroller.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods | `{}` | +| `tektoncontroller.dashboardTemplate` | string | Go template expression for URLs in the dashboard if not using Tekton dashboard | `""` | +| `tektoncontroller.dashboardURL` | string | the dashboard URL (e.g. Tekton dashboard) | `""` | +| `tektoncontroller.image.pullPolicy` | string | Template for computing the tekton controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `tektoncontroller.image.repository` | string | Template for computing the tekton controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-tekton-controller"` | +| `tektoncontroller.image.tag` | string | Template for computing the tekton controller docker image tag | `"{{ .Values.image.tag }}"` | +| `tektoncontroller.logLevel` | string | | `"info"` | +| `tektoncontroller.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods | `{}` | +| `tektoncontroller.podAnnotations` | object | Annotations applied to the tekton controller pods | `{}` | +| `tektoncontroller.replicaCount` | int | Number of replicas | `1` | +| `tektoncontroller.resources.limits` | object | Resource limits applied to the tekton controller pods | `{"cpu":"100m","memory":"256Mi"}` | +| `tektoncontroller.resources.requests` | object | Resource requests applied to the tekton controller pods | `{"cpu":"80m","memory":"128Mi"}` | +| `tektoncontroller.service` | object | Service settings for the tekton controller | `{"annotations":{}}` | +| `tektoncontroller.terminationGracePeriodSeconds` | int | Termination grace period for tekton controller pods | `180` | +| `tektoncontroller.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods | `[]` | +| `user` | string | Git user name (used when GitHub app authentication is not enabled) | `""` | +| `webhooks.affinity` | object | [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods | `{}` | +| `webhooks.customDeploymentTriggerCommand` | string | deployments can configure the ability to allow custom lighthouse triggers using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing | `""` | +| `webhooks.image.pullPolicy` | string | Template for computing the webhooks controller docker image pull policy | `"{{ .Values.image.pullPolicy }}"` | +| `webhooks.image.repository` | string | Template for computing the webhooks controller docker image repository | `"{{ .Values.image.parentRepository }}/lighthouse-webhooks"` | +| `webhooks.image.tag` | string | Template for computing the webhooks controller docker image tag | `"{{ .Values.image.tag }}"` | +| `webhooks.ingress.annotations` | object | Webhooks ingress annotations | `{}` | +| `webhooks.ingress.enabled` | bool | Enable webhooks ingress | `false` | +| `webhooks.ingress.hosts` | list | Webhooks ingress host names | `[]` | +| `webhooks.ingress.ingressClassName` | string | Webhooks ingress ingressClassName | `nil` | +| `webhooks.ingress.tls.enabled` | bool | Enable webhooks ingress tls | `false` | +| `webhooks.ingress.tls.secretName` | string | Specify webhooks ingress tls secretName | `""` | +| `webhooks.labels` | object | allow optional labels to be added to the webhook deployment | `{}` | +| `webhooks.livenessProbe` | object | Liveness probe configuration | `{"initialDelaySeconds":60,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `webhooks.logLevel` | string | | `"info"` | +| `webhooks.nodeSelector` | object | [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods | `{}` | +| `webhooks.podAnnotations` | object | Annotations applied to the webhooks pods | `{}` | +| `webhooks.podLabels` | object | | `{}` | +| `webhooks.probe` | object | Liveness and readiness probes settings | `{"path":"/"}` | +| `webhooks.readinessProbe` | object | Readiness probe configuration | `{"periodSeconds":10,"successThreshold":1,"timeoutSeconds":1}` | +| `webhooks.replicaCount` | int | Number of replicas | `1` | +| `webhooks.resources.limits` | object | Resource limits applied to the webhooks pods | `{"cpu":"100m","memory":"512Mi"}` | +| `webhooks.resources.requests` | object | Resource requests applied to the webhooks pods | `{"cpu":"80m","memory":"128Mi"}` | +| `webhooks.service` | object | Service settings for the webhooks controller | `{"annotations":{},"externalPort":80,"internalPort":8080,"type":"ClusterIP"}` | +| `webhooks.serviceName` | string | Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out | `"hook"` | +| `webhooks.terminationGracePeriodSeconds` | int | Termination grace period for webhooks pods | `180` | +| `webhooks.tolerations` | list | [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods | `[]` | You can look directly at the [values.yaml](./values.yaml) file to look at the options and their default values. diff --git a/charts/lighthouse/lighthouse-1.13.2.tgz b/charts/lighthouse/lighthouse-1.13.2.tgz new file mode 100644 index 0000000000000000000000000000000000000000..81374d427d21e459a8d6985a9977e21362ae04fe GIT binary patch literal 15767 zcmYkDV{j#Hw6=F_+qUhAZ9AEXZF9%AZEIppY&)4alZkC7`bnuy3te zJAZug^gIOh7&W{EIyUq@w|`w-?bK}d>|E_U?fQJ?<@IO>LiAuQ{ZM54oA_1N&RK7A z*qZ`L@j+0GnBoK||HO_herVi1y>@HUCpKXoLCd(#H;!e4sK=%IE(2?sI~WN@$c`KW zh*ki`_8#`D^O?;V(Oqz@ZQz?+I-D;dwdK=D+aDh2rgrw0y#4p*b%N5dZj0&uXi2G5W z409Oor6R1{=VI1?6z@I|J_NTB<~yMI7<9FBrPRj@dT#gRz7Hgt81tJ4-^06MyWZNH zhlq#RF+rY15e|+_rh7t#VyRyl?%Dz0*9IK2jFJ%;_t}Aw!XpDGQ^ZUomCzLTPAjIG z|6l-SnVk`FKnZMr zW`gQ?BSYtR1VElYK#KKyB|!cst!SJ>K4hd9QC5@10}DM)s0E|0lpC@Qgv;1V(j$(@0MFfaqmnx9LOM+24C1h_@ujTJfW znXrr3Bz8*dIZ{T_oKQrGuTPvwLqo9x1~iV6YA}NHbuvw;69-TWoucXB4l6UoqM>F) ziXbrKP$i4Hzx(Q06Q;rpbFk)%@YvhO8JG~F!8@@293Kf$A(4RDU3y4gD*(vPzL$G#)JtHEjs)+-oNy%e>EL0++-cOT2 zNKwm715@i*_mXb-U>M`c#rRMS$UVl2ufsD@OrN2h_n;}C^d#|NfJ?kU`G#HQNBCA(>o#Z;70Aq*zq=;`5Ts0jgG7`}gdeRM z2cw{WDZ&*|I3Q4EPdWiF7NJJvR5?nPNygcWIx&=>$%N~a36{}ij-<`s2fRkMnk9i* zb&E2dIJ7^YETl3&X1*BT$bWWG?6_lkW|-9&J52Lt0itoEs$zfDqB~6xO7Ri}KLWrAyho~F%Xk2*j^Q=sgXtvY} zINKs8`s}B*K;s_1x5K@=m;JNPv5mP`a$05S87TBLQ@#=Sd;OPUFVleMlT(>!n_$nPvWcTg+y|qCTT7BWp`gPnR*Bl z=@(meF=5Sl4*5D(ZkzM^#( zDzTi^-wk0r8=G*09O;ne`s5M2?W}9nH6luy*%|>(SxN=acm+GT?jz1|$`X(=B0S-f z76V?9M;~mZt3#+NNch4G^n_~-Qilp5nFBU@u9Q$dO1}*@?Tz&IoK-VsaVkN zrC6m~QL7a85b)Spwbn7`tiis+umU~}{7u#*W8?MUS^Zep+ox0xvPx^q;*abk%3)IZ* zC<12`q-8RCm^`&Aqaj9v&mISs0a-jnmXqL16H#qfNW+Kun)M$J1;4HGeU| z1~)mEZ(a)j2sKv;;}Qih;N4;*!?M`5c*b>8u&=d6E?f>ZMaJ27j{%OJ4HEofa7d!| z4O2Nh68=>@P&HOyDs{R86C7t>gqG^CJ-A$bf^#-WUM!)CT7d&lASWtnF(F$|N}eym zg|Pv6Fn*4v;HNU1Wy>@1kR7Lk1D7Ci0Md+acod(Hc`>!RBE5jWjf(fCN0*HT#y0_w zRY}KCFc&dM)kNVS3Wc4>(|J$4$1(bl&VU(b5W_ba>o9wBj$$8VJB_b)dohuosNtbg zMKrhB39QlJ27{^b8-9P-otVaGHaj6yDWJzORik@KoiTnGM-uA;V?J>#M9w6W^MQmY z&cIipLRfb6CML4UQLs`q=|qR$TL95&k;F_}|5)np07ylxw`FF{PqWAv2TOEM$z4m} zirK-l*om+P`{%p*D{~q$+#v*M1q*<8x^KDRM-@^PT_+Z9#@AvJL%~I(#4N;{reXtF zp7H>6f80LU$+Hi}=Le#~oq*+ckSB<+QS5RtqxDhV<;doRzwW!3MBsZX5J8KbF;S#!8*c8D4^zWYda*5}XnSth$SjkyBk_^Ac#5hD?=z=RM~!yMrH?%@E&< zFwo}EXuIZycOfsPy2vclG%b9cD4)+=G++G^1?PE1?7|>vbO>>MkP-Pjl6hJWXz->= zUC~Li>!1o)nvO?gk5MBGlCy%YSJAB#*#QNCICsh?F@}1&;A+=XJ{O4Pv&A~Ds)6=U z(Dya_eej72<%R>pb>AOB5nyJFx;XMbW3o7L#`_3?tjNLif~LjmmT(;}#U5WxwbPXZ zxB%i(g(dJ*7)5+tzyd3yq9I)7p+?`a8it!A6mnB7KIPPTfGck*Hv^07# zcg9pWCBzO^g)GA4zvwm#As9@_){h3%N8t4$>`^+_Mz6R(;K%J@_fbJ;6npVxFldq# ze(hA+GHx(5s=(4x)bd3YmtNQH!^qon$QEWe@L|cesMXYM`PelqO~EvC=J~MJiH#Jd<}?_JS2ES}6VCwX zI0#o1G4|%kk7v5_Wb7G^St$Lr(Wk55om@%cUsGaE_;o8)A0JNj>`c`tM3Cnye;J0R zNz+2Ze)+T$K(CY0I7g+OL7S;xs|4$XFJ@5mO$(2?=w%|x32I*+-C%N*SQ*H0*B|^b zx`0IUgZqBc1%bc9tPE7nOIr*;Iiz(Fq~2J(nBp7sM&*G2n}ge zlSc-eQ>B8xv#V72@T#9K_?KBqxQ6FiqqR^My!h&h%&qIJqIx@9($XmE#q4zlpE%R& zCT(!t7A$6K9f>5gcW5>@Bxe^>Lg{(kZS8-j`jUhk!h@WlBAX?(anNm) zP{%m*ivkwg^j4-VgfrTviOKN_yv1RQL5kg~69|7+f1}^LSJAUEdxhabZGAilV|P?f z&y+Q#0*=5y(8}W%q1)I@()82UaDXjw;w&Qs6NRzg)%4$*l}58$yGCQl9H7Jz_*(rJt@c6VVT>-OP&QDN`b?BE zmLRmaH4+P6HNtt^)I&xr{;H$pE-_;IE~2ILSb~`=6(UXZ7i-;$WSAT)bYUKzU~od@ z8*J{B0p9@v1CHD4A$A~C>7VF6b-f!dB-i>o{8N5x@33lniuM4mh9cMc6~TzETXZAw zfkpX_a+`X00bw(GY+EE9{csYB*?ys@G`s+KaxTqwJ{{Imel{jCyFn+px;A>-lC=io zO|1ks>bV}XgU!V*rHk}6xTU*_P~ZBIqR8DYaviN|J6Rd2`RdpJ&R-GiucwX;vqv=c zZxugo|B50D?c+d~1=zZ(EEn8C6WJ@7rkWxvi%-rbXw+EMo%*}nx#d43qvqBAO`ze! z2?Yjs5Z1|0l%Qr&JQ|x)8t(HhKwzT63r-&>i+0kJ<0zuj*BHjw+P56ADYd|J$#W(+ zlrdUMBAlu(K6!h|GE7#f6u&-DbGgk@{kuvhZ$qo2w-}8*pD3ZYOnzG1D5r-nK_Sbq z8$g@up`;w2(4dqsyHoIMLRTy5kOps4eAp2U-kfbA6B7!Sy%Z9#ZWpCO_J@EJ=6m-T zePTWl5eZQ)N!EA{>B>AKDP~hOz0u?c==tvCqnK{eWrfoS;b+$PZeFfm2mjl{(Zj)K z1wxco7dd4xGkj0vo(jLcbm;ALZbC|}q^pR=R*Gml!(Ejycom<4L96N~Y^T7tFaw!R zIhWgoyu`5sgiGpKxXegDlTihyE|^u)Q~^CS9UJk?J0olO!1A|KgD5WWzc8W2cfX^= z;~@_B>8llz7@+Nlg)PDx43Crg zc-rNoA`^c>s0Aihs+Z$#2gB`Sb|aErwb4K6(N*mZg_B;c&~%bTqMv7n@jx!9t9461 zc0HX$(f=-4*Lyr!&^%pG_~e84XVY1U7ZG;r-NG^r@#|b7(FgoH7hzA6>#-QnmhYBJ zt~ArlR?t5F_yH#35}rw>&4P|j z9L3D^W7%6gK(P{|27QebDKjyoR0p}}OY|fPDI^^_nM*tdT6)8$AQoNSj|}2~4c9lB z$v&`vmp)kq(PEx^qn^pP%${3Ppb_z5B$yQL1h?)x>PM}}U1Z`Fs@kWdDsuNw{kzbp zi#{dZ?JoL~{OYHhi?iF#A5T9nUYmX4C(q~TTass=ci)^B$8(v{(-mBHR?Kb^q8T*r z@C%|XPrW(>@bbw0g&qU0eq5y_)MYy99qdA9UX@LfMx#zaKzs_c(yOjrLF@9)#9){I zJG9?07AFnB9#@jw(gt!%iRtej$W~FB6+kVjWHFdOFJe|odC=cEVIrV(l~U19qk#7| z8}do594Hf&6Sx*EH>&fSGq~nVzu$BW$>pm%&l|g!n>=iS{)f+HW<*2E&WmxIFE+*ap6O*8ZQQAmUb0Xq8a!e~E(C+%qI`bV!+xHU*WGwx5KX;}4 zJ(>oh9~FjzkTig#sDP$ecZ&@lSkUEy>=tN4v@I!iw;x98#*XV$QFhN4rPeh5Ncj1B z-E^^ctTC5qVOPN+p#2~bHXuS58o8gCAo5N!{Te}Ay00Z4p#$|%^E*70`S)&;@a=&h zaoi1&l>i=|iIhNe)xX%MKo?djxq7d?1YBq@$|nWZXtOebx=|5jbJOg&q!x=3 zDNej+lbOtgEPd6Qcf%a}QaAh&;$L$dCKRqSRn%Gqex<|sBdaoYMugSn+|Htk6I=-O zNk&9`HSt@Nz1nj1st$3>T9GmKd6f)imAB8Os-~?Jwqu+xQcD*uS`v zHNTe5@I0Hczu(+X1-^gw%zOnh$xe4%Dc7a2cC1Y*(8XDW=@SrA`$&kWjr3!G7xM}t z+#)9$%u+z>Da)h)n(uA!-qAfIqM9s5#YwBVW;pE1(XZo#2IG+Z$>O}rH>4L~sT(<% z%P_Ij>KGLZb5gW+R78y+K5&|sGaZ?9)CKUGmk>&ZWciAE-%w{}bHlRYZnZlqJxMJk z(_=hjRWPTev`S!0>N)%Bt5M2>BPyN%{Os{8;10o!!H|V8U4G^`m_dZsIzgtKBYbds=+OB1_vq4dJ zE{tZI-7gZ9RN7!K9`|dN38U@&6bqlyoqKHS)Qr*OhOY4Xk5wX#xhy+e2E;fdU-6y+CUo z4Sh?S(;#<7P4-tSww5++ zx=Ys@cF;HLA&JB>W_~Bb1)TzXj{x#*amF?uIL3G_h88MnmO^4YwT7+z5oKkI zUCdg$?mSbfWd*S@`&8KR4vc7m{DlkOIXLM9IJ`XS&qj${jGIjX0xOq-dGy&4pQkyhIdzg2mX-zldGIEqY0n;P>SEP8me`y%PVuNgiP#Yv%-2EOl z7o3Sc%-Cc;DRNN#A2%1)U*A)d)A}#<2?fSuLWWSZk4aiMd==G8pFi;I{1De++zQMf z>@k%#Cvb~cAJATW-9oO~`*%y|5c~Hm#k)CL6AFbv8s^b|*-VE5heIil8r3#aFgXOh zCdCx!R2-?t9@-UYmtuW?CY$v9!B1zFwi#?RxTFSusB(#~((7}9q7B}W#oe-{vY50> z*H5^Z?n+;T9jnCwRkq;>(^Yxi7PHwxvtS#PPJ77iC_`*E^3R=*wI1mp^ODiOzy1%o0{M zJLM3o`ZU^t6bw?+L+!da&EqUPaJWK1zoV1-WU{M3B|~Yrz~yt0{1b@)!F$3ZW!bp^ z1=t%+dV9@*0^JE}7tz2Q+z2RXsonKn&xcIu9l;8U4lZ#AX#lpbj56M0&k8J2bqyCL zCc-#z>QHE?GsmwLzJ{%E>?-TAbp5^n(h7>Oa28av7O0sEYIqaKdF_igoBkP*`{E2d zh*c=>>9y%L>_QryVBWO2Q_b7A_SBb__u7BPTqt7Rr(g!5u&N6?ka>=oyRCm$uEoU6 zp;JjTe-{gf(wNjx{LaE@A<2 zk&%62z*|)WRV>o#|)IwJcMf3xA;iHOBI%O9pGT z71TD(aPbOE>wt4CY(k z1OM~6F=PIkeJ_WUT^1Su6mPRarGWez@S3u^NG1j0dC*p4X)pUzFkoze} z{;i-#yZgT%>H+W;I5oUm3F0{eDf&cPQ-h3izWQ)Mu7SRmfkU9RB&|Xa2ti(XH?V0I zbPL&KQ)W2zvAcVP^KbAW1?tOR9uC$XIcVHi)PmVn^|la5JihQL2tMx^XP3fstrD3% zlr@l-;C64_lZ6CDCb{sy6hf6A$59G!u{dN9VEI7}eHvnRmCv5vUKrKg_YuN7&}rXf zzxJ(NV}16ud%pd9#dHmlVRJ0v{bm!vh6!yyYB=+T+bqo;2(NME{o7bKJ$*00bRa5xIy%mYMNN1BFw^}iV( zy$^&_2X5Sh^7cQjpN;G<0#}odvOu=kFXwF7tV%<1uJ>G-s5-D-sI^)cLD1ZQU6|dC z(v{Nkk3c+iApfglw{beigZs(YR#x#9q=pSH49tBfDgQ65>wjT)YKlMFyL*yA|H2A@ za?aiQDvRI0P+rUxtCe0D5*V@x--Y!+JO!X8cWe`Y`#or-X7Ll{y(jL=lF9ndD@()> zhQKq+NGZc#N7og(Q^Dzv9SM2_5{J zoJvF#a=gy6tN@&diB7F6D|W|p)`}z%)s25Sj6248b$wuEH>jxZv)T&ue715m`sK-F zjkPLNM)0s^wx~WTaQ_68x9#!@QISYtT+m%*?I#rsi1;!uW&s0cJ0_R3!2J?Ps7j=I zThEz7RU$*)V--OlKwsZzne>slcM^-y#3~TluTa%OC?GJRO428#C(Y4fFuHU9kRGGL zm{H^p_C@Nyp9^eDTdWphEpBLQ-^k`@hnX0e-wg=-Mv>`fPBhsFf1ztB#XUnbv$koz z?qjRuN+%HMjoiu=8cws0WZzIdULD<3c+V5vlsF~uyiM4-h~mK|U6wbWd2f}Tr_yPP zz5o&Nwn``Fz2Gw3_5y*rM+c8)T9JyFBpXY*Z@*wdo;*ru{*gqxjb<#bLp2hH{|D6- zF@%Q}_{-2H;ZdeoD|g&THt;9T+Q2&jPD33S~?l(@xv33Ma0dxG9zq7mF z&0NXfY;L7e81L*F3h;?-@xvbGu5rP&ZYxSC`L%VAdP#=0a_wJ*PT2+Z6SDr+pmN(zJ_+5sK4Iz z79n(=^{9K-%({{J%mE8FGug50l- zb$s5wmZo3(LT3uHY%W1>-IDJGmzQy$b9#&HtNw>kj_nIp7OvV=C9PP>D`MSHttvaX z4R5GMerv13Tt{x)2$9S5q-^IkQlVFq6`$pYyM7+v=I?+Z{2xhQ?X#eZ!$fi~3^YQQ zqmE@?+;`A+GRQNqVQu$c0a7{`>z?*#Ggd}?PD{3yyB5J3EKc}69_^ekw|A{J#C=B; zdV_V)q$T1^v&(o{TBYzF0U7oDmn31!3is@{d48>=?X0K6lPTuo)rw(xab}>VALZxe z$L~9ZnGfwXAQS(sd$vBPd;9#E=MJQ|sGbEfK8O=#$a`5K1ub%XEBKcdDX_D3N8L7EC3D+881TgCloH2 zA0l6eWIx3pSLAWb1^s9~@XVN8=L7YVBBk?+dIJ?E=h1CQ7kcYoq#7^;%H;>X9#w#z z7OW)+Ef_N8sCFkh!nd;nwpaO45C;bI0^B)gbIHw+ba$*5hE zz559)H@@!R3;I_Jy$xBG1hAKn?tp;u9e_-dThIVStl)qv8t@ctl4B1b_E|rkJ(xuH zy#W_q3SV`$d@gKVG4cvm)h7_^$MX?AGMA>&lWyB!8o!S$1lJpAK-IB?Bx-ceU0Bro2n`KFC7#)1_TLXU*C zds$355<`XDS?iIvj;@i>DIMe>FYJM~3xi<{`fsG1Y!wjc!>o!#EVQzf6_+Cp#B94` z$l#yGk_gEvq3KC%`~y7Ru8X zkm->H&+pPm^^5m6YW2q>?&r^Mt1y3jsyQwyC}MsD28%JjdcK_rS3N7R0-|;wDBmZV z3OuFayDvmjQg172X;PG5zZBl47W|l~NHFU!1%;9_ImZgW4Bt3k*mS(B-sr=fxf%g@5lq+H0 z%z}Wnboa~bXeWH&Yw6$BOPYe8K`U<)G+XwX0e4znG5oIjK};7@_F+#>0e3Nv+V)fR z!v>5SN`i~OF zG010M_`$<-@|MJd=PA%u)^!^6ul7@>6d6unugR&|}0&Vsze(dgSfg1CdoxWJG zNcYAJh5A7MHWm7fIPcFnwes&=_ko^{6<(k@kHA1vP;0sd0jf3DlQH~8mjhVS`tEe` z_i{y@s=^U|yMVuS>qu(mqh*CF1OzkT$kEUSaHRKxCDj`l3Z-J|z7R*P*3}g)MObP= zq)Wtf#k>a;^ItFoA0e#+2un7VkX3e74^66Y)(sCX#35zyA~T8^**eTv@Lq{=5s+$g zQPwy2wLt|(zXS#Hp=eH1E&7?0ne9x(IYo0#GG^E**ss>;e~WPP@#g+L`8n|C&)rx= z^PFPcjl810ka*r-v!>H^v*x9VSMn-`a?W?eCZ`u2&kvn%K!rJydmR$c3!AbskuCQe zB}V<$uFAuddsJ}LS77dPt@1Y}&t~-?{h+9K@ zbxF^PvBym(mN4X4kJ2-m)W?%Vb^ zPCkpC0(ObH4IRQ?gELcGy2p;|CM{OTPlVIz%&ZvDiP1Y`?=-UWQS%UGlFQ%9GW{UKTrZp!{?Y@Aj-%nl6;5uqmaR9&x6GOHE|)0WPBlSo|Rm1cV21C zPBg-aQu9ZpXpf%c=@NgkZS8DY-a`Iv==-_RRGMZGg%s1XmeFdMRU=KYPsDS&%($!I zSmGB++1z4%pPVH(OF~!$Z^85$_ZR)pjxR^zn;z}Kf`Qbxry%fbf@ckgN_h6vbSq4Y z%0%BQ5%I7)6l|Ny$=ng_5rw zXpXzeB&qBXd|mzx8T(&JEAA6_s?ynGMAqouW@PF;BpY`uDL?weV}xxblD)-}YG)B| ztV7A@a0mD~_7BU5gwKz7fBO0TQXs91>S%+te++tPy6ve}@xemHiF?rkbNG|!h;>8E zT(|?_=@b&Lqk7Rp?=|Hq`S-<_V+a;+Z!##fZ9onjjticO)04GmB^)E0qHsQ#ENqoX zqtaWaXapQVNXDv2O1z8^^*9IuafXfe%*r>GReVNTjOeijfxGlO&oiXVt41A+$mtE2 z`i7tmX$j`YnK+&~ma!sh$(Yu?cKQ0TFz*AMNld{H#_;;19IQed$CxU7yBuf>|W5UAXMg^r`wZ`Z`wmYi$u4@}C}? zyhD`HY~q?}PL--N0jtO5ZLYZ}w*q#~EIBHBNILLK(5GwPQsCvoMp(cLDoT?jul6jdrmH#@tYW@fl7b-lB=XDVQ9;XmnCvZjr%w>nzR z!|4-VlgTC_(s59|3*$}?_h!o7?u(e=H^aSeOPN`u(`y`)$6dx&cSBy9WaZ1(?{;m- zMON2Am)QUE>vvxPf>%eT(?44R_QBJW+x+%2*sjMdqW?fuUGeHH5JMC|mjm>^>DxW> zY3%s--)DaST;;7s8UIBMeYW}hAO?I_89ZEn5<*3ryd@95SPpZ}`mM4sSu zt9e4sk{5Njhp1CYWB|44J~5L=RIA%G9dfd!G*%HPR?K@V+D$r?boy$a3rg`Qd^BVs ze(mCwd=LIiRB6#8bido(so8(Mp8Yq?06mskiWt+(0BA%WzjY9mpdy+QdhtGpLT(eB zP^?-DLExB917av9&Hw}@q<{dC@JVyTh%f9yS|Fivk{}`b&d~fns+R1IY6yyjJX-1n z*?aP4zt6~{|6dOp0(~<1F8mx;6?G5QT`!O4So=6vW!VQBdBRLg>}K;1-wmsIU3`MK z_Lt^CJO2=9zWyWU7w)^^EseL?($5?)e6eTh_XxvMdcy86ls2Ca zAicSM$3zo!j+?I;G+TFaxn9zYcK+YuKWpw(@5&w#bE{eKe@lCX3^%M!^nD}B%WI#F zN8a%hJX81;ulzy@pAU%OItrBAu6CZz2X*`Q^z474Y&N{*eD3NkayaG21GVIp9O>eN z$jEY?;VGb<#(5FJxf~~8{br*-zQo3H?B-XvONcTQx~d@he$2cyE!hb9LhmrB@IL4> z!4P^8%Wyz6e$XB$z9Teigi2nv<(|H;zHl$dJQ zGai6(L+K4hXe4-r-!W69D_Bmyv^}q(NPf%tIL0jKZR-uTt#i2UST-dp>+XRzhSG=a z*LI>c1{_{y(T9xesC1cDBCT*$G}2NI$i{t0<2J^>(E-_%Bx?eoEN{3c$la0F7B?mB zMtg(LQDHPim=EpB!LgGf0`?eqrE~wzTasFgV~E8~;;VCUbE48HMN5fZm@dez1-8LG zqKI81N|p*)wYP7Gg`)NK>cY};M>U~a$j#O9nfEH0Y#B4SG1eTxzJDkGeWC3n6b#nf zlF6*@(1iZeC5H+v??Iytl~M0d`w^>hOHxe@*Ar``Zwrqq&S)+orJA$?RJ!o|^q{nx(kd$L&_QrqXev zcurbtR+|09sf$uGJmg=Q^Q^71b8$O!Fi~Y)f~Yg46**p!R*}5PmbJvrx@%NLG32vd z(?k?3*tUfs<-f6hl_B5_1)N9>yZ%EBubLtAAjIc_$i|fJj;MgDnBPYbM*NmAgCo6c zj{F!BjSx)}YaD!jMChCwvPYQ=*t-IoJ52j2{&CRs!QiNJUMtMGT{{5-2r>7l+);^^{lL_T3S_|)a*Gl)a=6&#axrTkf zA>`G>{cF4l%=lo*D92?O8}6j<3aim9pAEcx3&!t14stCffehJsoGNhYrZLq0z!7a^r`1bxo ziq~dCi2obkeE3CV?eVU(RM&#TK$xcSm2?!qbkk}-8-fL5z1^4)oXx}T(3l(qVSpgHE28!-T6@H>c#z%T) z^c_jumF+(lk?!RGHA0w65&(%trG7yMZ)CtAwD6eDh$iwH<2*jP#zJ*^=DP{37DGwf zJByUaupDd3RVBZ6wOt4A8Tl|2#I!^FAGbu9TLbr1XPpY)mw8ewWIQ5mDTJ93&R=UA zD{tjR!qO6f>4V?c-YvJeC%2hZ0|mKQN7dQiq$$YgGOfd_GfKb5zmA?Z%SH?#mb zG;y-I5W1#y9%u~nRD?*9w=FH9=DlLl3B6Oobun|3i) zYrj(-oY?3&p!oXe^VYGmJ;76{b{gbKLIK&Eu*ShP4`x`fDXVK>Hg>5>gdvQz!}uU0 zvdish#Z%!H949|s#7NU}Wl&A)dBRpxTR8AHudLugkvvwgXOCgG

YFH@@N#rSjPCXHiP|vRkrbPWs$WkoVaLvGJgcEs<^4Rs+j2W_v^0(% zJY@Nz()Tk9Vcof==8pSzQwgRw1M5K-Il-dW_8_2QdWJ_-ZQQEG_8>o<*!VOb3HiqRH*dCO^5syg11o> znhp&CsA6*8t}(XnCGKfiN@%JYYkvgBTza1Y>pkW$s5XYBx9>|eru&X%W8}J}{O9I7 z2OWOiqrgfwS&smgRxc5U$;KuR0>m+DC~I(vIeqobr_%DAu;q0h2l3ax9a zi`15vdbMSYIeB{^8WqOx(gg+b5qzcaN7olR1l(4KcTtVhES6nxJv4mcc1cKcWR>}8 zKQ1L3VP{RCcx>qnwnQ?12fOorbB7eh_v z$i`zwl(8C-c9r7gVsm&Wj0 z0ReBaDq}006#15<@nQZ@1j^@katOxufGj2{mf5cSY5`^Gyn6=4XamO2E=pKboONow zcUP2phBGhvdzAjjL|c0(AyL-Uqmt*QaMtg7{^`lpwrP78@vvYiz8Q(YgqX?w$sD7c z`W9-s*5)vw5_$m4JwFkVt95-+a_kfeTd)UEdp z!eZuVkZDpd#`0%en~5Hw%3wQtF$;c1LIdV&n+!nTwy@ zeQ@yLjK~DcaAUPm&9&A2uI8CJiSkS<$7^$n4i^dPyg2k9o92xM54* z_--wdSi75O%A}|Ui3hiiT{xZqAyWn@=(_0u3)ToCZR8)p#7z6WK+i2##ytHb-FCi9 zY;CMjtj2Uz2O?`swSwrx{t<0Jyaho^WqzXzb&%vN8W6Z}il4w@ZpRL0n7#}(S>%nL zWq{)kUNp&l3YLrpQe?smU;b7KyCVS^Vc=vXPv~X2;fRjk!Tfby^Iuuj-SE`LbGy5m zH2u=YI=Fu5IqHu>PkU3eMMHz|H-~dEg-Y&*P@ER2<-BIJ-rT_%%Rn;?>y!vNCUI^r$o}}(4OtX= zt(ME*M-C@+@`*Cj@cz7%I#eZzq|()>p`666@LAZBe$z1dJt)|jB04g`R>u@bAWpFU z-^3p%V7NO7-YjCNro(N+=}nKd8T$)QEAUid8*2s2$nf&Q{Y(8Jdb&u^gZ z%S+F8U&9O^hVDz#42fj>q~h+sQWD{rV%ljOS9fN-Dn(-U2w?Okr*^oqR; zLcLUYHdhwdq3}{wA+yC$QEK2tvt*bXvP2Jkc?vi?wT6T1lJf=dyI?9T7p%eGBw6y#@Kz@l|rKYP$YfV|=qzaidhRsT4%= Q&w~wWmfVW~ynzA!9}_XaDF6Tf literal 0 HcmV?d00001 diff --git a/charts/lighthouse/values.yaml b/charts/lighthouse/values.yaml index 54f43949c..fb1d7b586 100644 --- a/charts/lighthouse/values.yaml +++ b/charts/lighthouse/values.yaml @@ -1,80 +1,57 @@ git: # git.kind -- Git SCM provider (`github`, `gitlab`, `stash`) kind: github - # git.server -- Git server URL server: "" - # lighthouseJobNamespace -- Namespace where `LighthouseJob`s and `Pod`s are created # @default -- Deployment namespace lighthouseJobNamespace: "" - githubApp: # githubApp.enabled -- Enables GitHub app authentication enabled: false - # githubApp.username -- GitHub app user name - username: "jenkins-x[bot]" - + username: "jenkins-x[bot]" # user -- Git user name (used when GitHub app authentication is not enabled) user: "" - # oauthToken -- Git token (used when GitHub app authentication is not enabled) oauthToken: "" - # oauthSecretName -- Existing Git token secret oauthSecretName: "" - # oauthTokenVolumeMount -- Mount Git token as a volume instead of using an environment variable Secret reference (used when GitHub app authentication is not enabled) oauthTokenVolumeMount: enabled: false - # hmacToken -- Secret used for webhooks hmacToken: "" - # hmacSecretName -- Existing hmac secret to use for webhooks hmacSecretName: "" - # hmacTokenEnabled -- Enables the use of a hmac token. This should always be enabled if possible - though some git providers don't support it such as bitbucket cloud hmacTokenEnabled: true - # hmacTokenVolumeMount -- Mount hmac token as a volume instead of using an environment variable Secret reference hmacTokenVolumeMount: enabled: false - # logFormat -- Log format either json or stackdriver logFormat: "json" - # logService -- The name of the service registered with logging logService: "" - # logStackSkip -- Comma separated stack frames to skip from the log logStackSkip: "" - # scope -- limit permissions to namespace privileges scope: "cluster" - cluster: crds: # cluster.crds.create -- Create custom resource definitions create: true - image: # image.parentRepository -- Docker registry to pull images from parentRepository: ghcr.io/jenkins-x - # image.tag -- Docker images tag # the following tag is latest on the main branch, it's a specific version on a git tag - tag: latest - + tag: 1.13.2 # image.pullPolicy -- Image pull policy pullPolicy: IfNotPresent - # env -- Environment variables env: JX_DEFAULT_IMAGE: "" - - externalPlugins: - name: cd-indicators requiredResources: @@ -86,392 +63,287 @@ externalPlugins: - kind: Service namespace: jx name: lighthouse-webui-plugin - gcJobs: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # gcJobs.maxAge -- Max age from which `LighthouseJob`s will be deleted maxAge: 168h - # gcJobs.schedule -- Cron expression to periodically delete `LighthouseJob`s schedule: "0/30 * * * *" - # gcJobs.failedJobsHistoryLimit -- Drives the failed jobs history limit failedJobsHistoryLimit: 1 - # gcJobs.successfulJobsHistoryLimit -- Drives the successful jobs history limit successfulJobsHistoryLimit: 3 - # gcJobs.concurrencyPolicy -- Drives the job's concurrency policy concurrencyPolicy: Forbid - # gcJobs.backoffLimit -- Drives the job's backoff limit backoffLimit: 6 - image: # gcJobs.image.repository -- Template for computing the gc job docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-gc-jobs" - # gcJobs.image.tag -- Template for computing the gc job docker image tag tag: "{{ .Values.image.tag }}" - # gcJobs.image.pullPolicy -- Template for computing the gc job docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - webhooks: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # webhooks.replicaCount -- Number of replicas replicaCount: 1 - # webhooks.terminationGracePeriodSeconds -- Termination grace period for webhooks pods terminationGracePeriodSeconds: 180 - image: # webhooks.image.repository -- Template for computing the webhooks controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-webhooks" - # webhooks.image.tag -- Template for computing the webhooks controller docker image tag tag: "{{ .Values.image.tag }}" - # webhooks.image.pullPolicy -- Template for computing the webhooks controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - - # webhooks.labels -- allow optional labels to be added to the webhook deployment labels: {} podLabels: {} - # webhooks.podAnnotations -- Annotations applied to the webhooks pods podAnnotations: {} - # webhooks.serviceName -- Allows overriding the service name, this is here for compatibility reasons, regular users should clear this out serviceName: hook - # webhooks.service -- Service settings for the webhooks controller service: type: ClusterIP externalPort: 80 internalPort: 8080 annotations: {} - resources: # webhooks.resources.limits -- Resource limits applied to the webhooks pods limits: cpu: 100m # may require more memory to perform the initial 'git clone' cmd for big repositories memory: 512Mi - # webhooks.resources.requests -- Resource requests applied to the webhooks pods requests: cpu: 80m memory: 128Mi - # webhooks.probe -- Liveness and readiness probes settings probe: path: / - # webhooks.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 60 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # webhooks.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # webhooks.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the webhooks pods nodeSelector: {} - # webhooks.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the webhooks pods affinity: {} - # webhooks.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the webhooks pods tolerations: [] - ingress: # webhooks.ingress.enabled -- Enable webhooks ingress enabled: false - # webhooks.ingress.annotations -- Webhooks ingress annotations annotations: {} - # webhooks.ingress.ingressClassName -- Webhooks ingress ingressClassName ingressClassName: null - # webhooks.ingress.hosts -- Webhooks ingress host names hosts: [] - tls: # webhooks.ingress.tls.enabled -- Enable webhooks ingress tls enabled: false # webhooks.ingress.tls.secretName -- Specify webhooks ingress tls secretName secretName: "" - # webhooks.customDeploymentTriggerCommand -- deployments can configure the ability to allow custom lighthouse triggers # using their own unique chat prefix, for example extending the default `/test` trigger prefix let them specify # `customDeploymentTriggerPrefix: foo` which means they can also use their own custom trigger /foo mycoolthing customDeploymentTriggerCommand: "" - foghorn: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # foghorn.replicaCount -- Number of replicas replicaCount: 1 - # foghorn.terminationGracePeriodSeconds -- Termination grace period for foghorn pods terminationGracePeriodSeconds: 180 - image: # foghorn.image.repository -- Template for computing the foghorn controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-foghorn" - # foghorn.image.tag -- Template for computing the foghorn controller docker image tag tag: "{{ .Values.image.tag }}" - # foghorn.image.pullPolicy -- Template for computing the foghorn controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - resources: # foghorn.resources.limits -- Resource limits applied to the foghorn pods limits: cpu: 100m memory: 256Mi - # foghorn.resources.requests -- Resource requests applied to the foghorn pods requests: cpu: 80m memory: 128Mi - # foghorn.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the foghorn pods nodeSelector: {} - # foghorn.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the foghorn pods affinity: {} - # foghorn.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the foghorn pods tolerations: [] - - tektoncontroller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # tektoncontroller.dashboardURL -- the dashboard URL (e.g. Tekton dashboard) dashboardURL: '' # tektoncontroller.dashboardTemplate -- Go template expression for URLs in the dashboard if not using Tekton dashboard dashboardTemplate: '' - # tektoncontroller.replicaCount -- Number of replicas replicaCount: 1 - # tektoncontroller.terminationGracePeriodSeconds -- Termination grace period for tekton controller pods terminationGracePeriodSeconds: 180 - image: # tektoncontroller.image.repository -- Template for computing the tekton controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-tekton-controller" - # tektoncontroller.image.tag -- Template for computing the tekton controller docker image tag tag: "{{ .Values.image.tag }}" - # tektoncontroller.image.pullPolicy -- Template for computing the tekton controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # tektoncontroller.podAnnotations -- Annotations applied to the tekton controller pods podAnnotations: {} - # tektoncontroller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods nodeSelector: {} - # tektoncontroller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods affinity: {} - # tektoncontroller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods tolerations: [] - resources: # tektoncontroller.resources.limits -- Resource limits applied to the tekton controller pods limits: cpu: 100m memory: 256Mi - # tektoncontroller.resources.requests -- Resource requests applied to the tekton controller pods requests: cpu: 80m memory: 128Mi - # tektoncontroller.service -- Service settings for the tekton controller service: annotations: {} - jenkinscontroller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # jenkinscontroller.jenkinsURL -- The URL of the Jenkins instance jenkinsURL: - # jenkinscontroller.jenkinsUser -- The username for the Jenkins user jenkinsUser: - # jenkinscontroller.jenkinsToken -- The token for authenticating the Jenkins user jenkinsToken: - # jenkinscontroller.terminationGracePeriodSeconds -- Termination grace period for tekton controller pods terminationGracePeriodSeconds: 180 - image: # jenkinscontroller.image.repository -- Template for computing the Jenkins controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-jenkins-controller" - # jenkinscontroller.image.tag -- Template for computing the tekton controller docker image tag tag: "{{ .Values.image.tag }}" - # jenkinscontroller.image.pullPolicy -- Template for computing the tekton controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # jenkinscontroller.podAnnotations -- Annotations applied to the tekton controller pods podAnnotations: {} - # jenkinscontroller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the tekton controller pods nodeSelector: {} - # jenkinscontroller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the tekton controller pods affinity: {} - # jenkinscontroller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the tekton controller pods tolerations: [] - resources: # jenkinscontroller.resources.limits -- Resource limits applied to the tekton controller pods limits: cpu: 100m memory: 256Mi - # jenkinscontroller.resources.requests -- Resource requests applied to the tekton controller pods requests: cpu: 80m memory: 128Mi - # jenkinscontroller.service -- Service settings for the tekton controller service: annotations: {} - keeper: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # keeper.statusContextLabel -- Label used to report status to git provider statusContextLabel: "Lighthouse Merge Status" - # keeper.replicaCount -- Number of replicas replicaCount: 1 - # keeper.terminationGracePeriodSeconds -- Termination grace period for keeper pods terminationGracePeriodSeconds: 30 - image: # keeper.image.repository -- Template for computing the keeper controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-keeper" - # keeper.image.tag -- Template for computing the keeper controller docker image tag tag: "{{ .Values.image.tag }}" - # keeper.image.pullPolicy -- Template for computing the keeper controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # keeper.podAnnotations -- Annotations applied to the keeper pods podAnnotations: {} - # keeper.env -- Lets you define keeper specific environment variables env: {} - # keeper.service -- Service settings for the keeper controller service: type: ClusterIP externalPort: 80 internalPort: 8888 - resources: # keeper.resources.limits -- Resource limits applied to the keeper pods limits: cpu: 400m memory: 512Mi - # keeper.resources.requests -- Resource requests applied to the keeper pods requests: cpu: 100m memory: 128Mi - # keeper.probe -- Liveness and readiness probes settings probe: path: / - # keeper.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 120 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # keeper.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - datadog: # keeper.datadog.enabled -- Enables datadog enabled: "true" - # keeper.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the keeper pods nodeSelector: {} - # keeper.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the keeper pods affinity: {} - # keeper.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the keeper pods tolerations: [] - poller: # logLevel -- The logging level: trace, debug, info, warn, error, fatal logLevel: "info" - # poller.enabled -- Whether to enable or disable the poller component enabled: false - # poller.replicaCount -- Number of replicas replicaCount: 1 - # poller.terminationGracePeriodSeconds -- Termination grace period for poller pods terminationGracePeriodSeconds: 30 - image: # poller.image.repository -- Template for computing the poller controller docker image repository repository: "{{ .Values.image.parentRepository }}/lighthouse-poller" - # poller.image.tag -- Template for computing the poller controller docker image tag tag: "{{ .Values.image.tag }}" - # poller.image.pullPolicy -- Template for computing the poller controller docker image pull policy pullPolicy: "{{ .Values.image.pullPolicy }}" - # poller.podAnnotations -- Annotations applied to the poller pods podAnnotations: {} - # poller.env -- Lets you define poller specific environment variables env: # poller.env.POLL_PERIOD the default time period between polling releases and pull requests POLL_PERIOD: 20s - # poller.env.POLL_RELEASE_PERIOD the time period between polling releases # POLL_RELEASE_PERIOD: 20s @@ -480,77 +352,58 @@ poller: # poller.env.POLL_HOOK_ENDPOINT the hook service endpoint to post webhooks to POLL_HOOK_ENDPOINT: http://hook/hook/poll - # poller.contextMatchPattern -- Regex pattern to use to match commit status context contextMatchPattern: "" - # poller.requireReleaseSuccess -- Keep polling releases until the most recent commit status is successful requireReleaseSuccess: false - resources: # poller.resources.limits -- Resource limits applied to the poller pods limits: cpu: 400m memory: 512Mi - # poller.resources.requests -- Resource requests applied to the poller pods requests: cpu: 100m memory: 128Mi - # poller.probe -- Liveness and readiness probes settings probe: path: / - # keeper.internalPort -- The internal port used to view metrics etc internalPort: 8888 - # poller.livenessProbe -- Liveness probe configuration livenessProbe: initialDelaySeconds: 120 periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - # poller.readinessProbe -- Readiness probe configuration readinessProbe: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 - datadog: # poller.datadog.enabled -- Enables datadog enabled: "true" - # poller.nodeSelector -- [Node selector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector) applied to the poller pods nodeSelector: {} - # poller.affinity -- [Affinity rules](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) applied to the poller pods affinity: {} - # poller.tolerations -- [Tolerations](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) applied to the poller pods tolerations: [] - engines: # engines.jx -- Enables the jx engine jx: true - # engines.tekton -- Enables the tekton engine tekton: false - # engines.jenkins -- Enables the Jenkins engine jenkins: false - configMaps: # configMaps.create -- Enables creation of `config.yaml` and `plugins.yaml` config maps create: false - # configMaps.config -- Raw `config.yaml` content config: null - # configMaps.plugins -- Raw `plugins.yaml` content plugins: null - # configMaps.configUpdater -- Settings used to configure the `config-updater` plugin configUpdater: orgAndRepo: ""