From d3cbc6447630129ab02d2c77b9418e258ed5a846 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Jan 2022 14:16:56 +0200 Subject: [PATCH 001/262] Bump golang from 1.17.5 to 1.17.6 (#1079) Bumps golang from 1.17.5 to 1.17.6. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 1108d4fe3..ded9d9147 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.5 AS build +FROM golang:1.17.6 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 911996894064425330f590f3b394b9caa9e1a006 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Wed, 19 Jan 2022 16:13:23 +0800 Subject: [PATCH 002/262] Remove broken badges and add link for some badges (#1083) * Remove broken badges and add link for some badges * update badges in docs --- README.md | 7 +++---- docs/index.md | 7 +++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 9650439e4..88b7b59fc 100644 --- a/README.md +++ b/README.md @@ -1,17 +1,16 @@ [![GitHub Release][release-img]][release] -![Downloads][download] -![Docker Pulls][docker-pull] +[![Downloads][download]][release] +[![Docker Pulls][docker-pull]][docker] [![Go Report Card][report-card-img]][report-card] [![Build Status](https://github.com/aquasecurity/kube-bench/workflows/Build/badge.svg?branch=main)](https://github.com/aquasecurity/kube-bench/actions) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/aquasecurity/kube-bench/blob/main/LICENSE) -[![Docker image](https://images.microbadger.com/badges/image/aquasec/kube-bench.svg)](https://microbadger.com/images/aquasec/kube-bench "Get your own image badge on microbadger.com") -[![Source commit](https://images.microbadger.com/badges/commit/aquasec/kube-bench.svg)](https://microbadger.com/images/aquasec/kube-bench) [![Coverage Status][cov-img]][cov] [download]: https://img.shields.io/github/downloads/aquasecurity/kube-bench/total?logo=github [release-img]: https://img.shields.io/github/release/aquasecurity/kube-bench.svg?logo=github [release]: https://github.com/aquasecurity/kube-bench/releases [docker-pull]: https://img.shields.io/docker/pulls/aquasec/kube-bench?logo=docker&label=docker%20pulls%20%2F%20kube-bench +[docker]: https://hub.docker.com/r/aquasec/kube-bench [cov-img]: https://codecov.io/github/aquasecurity/kube-bench/branch/main/graph/badge.svg [cov]: https://codecov.io/github/aquasecurity/kube-bench [report-card-img]: https://goreportcard.com/badge/github.com/aquasecurity/kube-bench diff --git a/docs/index.md b/docs/index.md index 815db57d0..3aa6faaf5 100644 --- a/docs/index.md +++ b/docs/index.md @@ -2,6 +2,7 @@ [release-img]: https://img.shields.io/github/release/aquasecurity/kube-bench.svg?logo=github [release]: https://github.com/aquasecurity/kube-bench/releases [docker-pull]: https://img.shields.io/docker/pulls/aquasec/kube-bench?logo=docker&label=docker%20pulls%20%2F%20kube-bench +[docker]: https://hub.docker.com/r/aquasec/kube-bench [cov-img]: https://codecov.io/github/aquasecurity/kube-bench/branch/main/graph/badge.svg [cov]: https://codecov.io/github/aquasecurity/kube-bench [report-card-img]: https://goreportcard.com/badge/github.com/aquasecurity/kube-bench @@ -9,13 +10,11 @@ ![Kube-bench Logo](images/kube-bench.jpg) [![GitHub Release][release-img]][release] -![Downloads][download] -![Docker Pulls][docker-pull] +[![Downloads][download]][release] +[![Docker Pulls][docker-pull]][docker] [![Go Report Card][report-card-img]][report-card] [![Build Status](https://github.com/aquasecurity/kube-bench/workflows/Build/badge.svg?branch=main)](https://github.com/aquasecurity/kube-bench/actions) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/aquasecurity/kube-bench/blob/main/LICENSE) -[![Docker image](https://images.microbadger.com/badges/image/aquasec/kube-bench.svg)](https://microbadger.com/images/aquasec/kube-bench "Get your own image badge on microbadger.com") -[![Source commit](https://images.microbadger.com/badges/commit/aquasec/kube-bench.svg)](https://microbadger.com/images/aquasec/kube-bench) [![Coverage Status][cov-img]][cov] From d232ae0fb9de0ec67a1213a681033c9d4b3da129 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Sun, 23 Jan 2022 15:40:59 +0800 Subject: [PATCH 003/262] Fix the `--exit-code` flag doesn't work when run with subcommand (#1084) --- cmd/master.go | 2 ++ cmd/node.go | 2 ++ cmd/root.go | 3 +-- cmd/run.go | 6 ++++-- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/cmd/master.go b/cmd/master.go index 99d9cb38a..590d9786b 100644 --- a/cmd/master.go +++ b/cmd/master.go @@ -16,6 +16,7 @@ package cmd import ( "fmt" + "os" "github.com/aquasecurity/kube-bench/check" "github.com/spf13/cobra" @@ -36,6 +37,7 @@ var masterCmd = &cobra.Command{ filename := loadConfig(check.MASTER, bv) runChecks(check.MASTER, filename, detecetedKubeVersion) writeOutput(controlsCollection) + os.Exit(exitCodeSelection(controlsCollection)) }, Deprecated: "this command will be retired soon. Please use the `run` command with `--targets=master` instead.", } diff --git a/cmd/node.go b/cmd/node.go index 5672297d2..63e8e289e 100644 --- a/cmd/node.go +++ b/cmd/node.go @@ -16,6 +16,7 @@ package cmd import ( "fmt" + "os" "github.com/aquasecurity/kube-bench/check" "github.com/spf13/cobra" @@ -36,6 +37,7 @@ var nodeCmd = &cobra.Command{ filename := loadConfig(check.NODE, bv) runChecks(check.NODE, filename, detecetedKubeVersion) writeOutput(controlsCollection) + os.Exit(exitCodeSelection(controlsCollection)) }, Deprecated: "this command will be retired soon. Please use the `run` command with `--targets=node` instead.", } diff --git a/cmd/root.go b/cmd/root.go index 785d021f3..ebfaa675d 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -136,8 +136,7 @@ var RootCmd = &cobra.Command{ } writeOutput(controlsCollection) - exitCode := exitCodeSelection(controlsCollection) - os.Exit(exitCode) + os.Exit(exitCodeSelection(controlsCollection)) }, } diff --git a/cmd/run.go b/cmd/run.go index db92fe73c..8dd392275 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -54,13 +54,15 @@ var runCmd = &cobra.Command{ path := filepath.Join(cfgDir, bv) err = mergeConfig(path) if err != nil { - fmt.Printf("Error in mergeConfig: %v\n", err) + exitWithError(fmt.Errorf("Error in mergeConfig: %v\n", err)) } err = run(targets, bv) if err != nil { - fmt.Printf("Error in run: %v\n", err) + exitWithError(fmt.Errorf("Error in run: %v\n", err)) } + + os.Exit(exitCodeSelection(controlsCollection)) }, } From c0349f0ca80527b646501cc62d455465869948f6 Mon Sep 17 00:00:00 2001 From: manasiprabhavalkar <61792275+manasiprabhavalkar@users.noreply.github.com> Date: Sat, 29 Jan 2022 06:48:28 -0500 Subject: [PATCH 004/262] Adding support for new cpu architecture (ppc64le) (#1078) * Update makefile Support ppc64le (IBM Power) architecture * Update .goreleaser.yml Added support for ppc64le cpu arch * Update publish.yml Added support for ppc64le cpu arch Co-authored-by: Yoav Rotem --- .github/workflows/publish.yml | 2 +- .goreleaser.yml | 1 + makefile | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 10abab815..b5c56dce7 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -52,7 +52,7 @@ jobs: uses: docker/build-push-action@v2 with: context: . - platforms: linux/amd64,linux/arm64 + platforms: linux/amd64,linux/arm64,linux/ppc64le builder: ${{ steps.buildx.outputs.name }} push: true build-args: | diff --git a/.goreleaser.yml b/.goreleaser.yml index e60c5a80c..21966f9fb 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -12,6 +12,7 @@ builds: - amd64 - arm - arm64 + - ppc64le goarm: - 6 - 7 diff --git a/makefile b/makefile index 5945e1a47..d2f3c3951 100644 --- a/makefile +++ b/makefile @@ -7,7 +7,7 @@ IMAGE_NAME ?= $(DOCKER_ORG)/$(BINARY):$(VERSION) GOOS ?= linux BUILD_OS := linux uname := $(shell uname -s) -BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm +BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm,linux/ppc64le DOCKER_ORGS ?= aquasec public.ecr.aws/aquasecurity GOARCH ?= $@ From e73c07d86f9bd4dacfd42db41c3b50ff48af7240 Mon Sep 17 00:00:00 2001 From: Shubham Deshmukh Date: Sun, 30 Jan 2022 14:08:03 +0530 Subject: [PATCH 005/262] Update kube bench docker image tag to v0.6.6 (#1085) Co-authored-by: Yoav Rotem --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 739fb9b78..bf1adf7a5 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:0.6.3 + image: aquasec/kube-bench:v0.6.6 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From 3b4e87cbc0471b53b5aad319ecd0a4180e4339b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Jan 2022 16:21:43 +0200 Subject: [PATCH 006/262] Bump k8s.io/client-go from 0.23.1 to 0.23.3 (#1087) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.23.1 to 0.23.3. - [Release notes](https://github.com/kubernetes/client-go/releases) - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.23.1...v0.23.3) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index e318d34e2..ba3296877 100644 --- a/go.mod +++ b/go.mod @@ -15,5 +15,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.2.2 gorm.io/gorm v1.22.3 - k8s.io/client-go v0.23.1 + k8s.io/client-go v0.23.3 ) diff --git a/go.sum b/go.sum index 9f8ef6d77..bd2ba3b3f 100644 --- a/go.sum +++ b/go.sum @@ -991,21 +991,21 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.1/go.mod h1:WfXnOnwSqNtG62Y1CdjoMxh7r7u9QXGCkA1u0na2jgo= -k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= -k8s.io/client-go v0.23.1 h1:Ma4Fhf/p07Nmj9yAB1H7UwbFHEBrSPg8lviR24U2GiQ= -k8s.io/client-go v0.23.1/go.mod h1:6QSI8fEuqD4zgFK0xbdwfB/PthBsIxCJMa3s17WlcO0= +k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= +k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/client-go v0.23.3 h1:23QYUmCQ/W6hW78xIwm3XqZrrKZM+LWDqW2zfo+szJs= +k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= From 9f92e81361a1d4b883e81c42e00e588e85db9a11 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Tue, 15 Feb 2022 01:05:48 +0800 Subject: [PATCH 007/262] Update docs about the --outputfile flag (#1094) --- cmd/root.go | 2 +- docs/flags-and-commands.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/root.go b/cmd/root.go index ebfaa675d..7d2e65a8b 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -172,7 +172,7 @@ func init() { RootCmd.PersistentFlags().BoolVar(&filterOpts.Unscored, "unscored", true, "Run the unscored CIS checks") RootCmd.PersistentFlags().StringVar(&skipIds, "skip", "", "List of comma separated values of checks to be skipped") RootCmd.PersistentFlags().BoolVar(&includeTestOutput, "include-test-output", false, "Prints the actual result when test fails") - RootCmd.PersistentFlags().StringVar(&outputFile, "outputfile", "", "Writes the JSON results to output file") + RootCmd.PersistentFlags().StringVar(&outputFile, "outputfile", "", "Writes the results to output file when run with --json or --junit") RootCmd.PersistentFlags().StringVarP( &filterOpts.CheckList, diff --git a/docs/flags-and-commands.md b/docs/flags-and-commands.md index 27a254462..d00d9ab40 100644 --- a/docs/flags-and-commands.md +++ b/docs/flags-and-commands.md @@ -23,7 +23,7 @@ Flag | Description --noremediations | Disable printing of remediations section to stdout. --noresults | Disable printing of results section to stdout. --nototals | Disable calculating and printing of totals for failed, passed, ... checks across all sections ---outputfile | Writes the JSON results to output file +--outputfile | Writes the results to output file when run with --json or --junit --pgsql | Save the results to PostgreSQL --scored | Run the scored CIS checks (default true) --skip string | List of comma separated values of checks to be skipped From 67f8b338d51eb3a90c0e7b7bd6eb87d578ddcd5b Mon Sep 17 00:00:00 2001 From: Nick Gibbon Date: Tue, 22 Feb 2022 16:10:51 +0000 Subject: [PATCH 008/262] fix: remove master and node Commands (#960) Co-authored-by: Yoav Rotem --- cmd/master.go | 54 --------------------------------------------------- cmd/node.go | 54 --------------------------------------------------- 2 files changed, 108 deletions(-) delete mode 100644 cmd/master.go delete mode 100644 cmd/node.go diff --git a/cmd/master.go b/cmd/master.go deleted file mode 100644 index 590d9786b..000000000 --- a/cmd/master.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright © 2017-2019 Aqua Security Software Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cmd - -import ( - "fmt" - "os" - - "github.com/aquasecurity/kube-bench/check" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -// masterCmd represents the master command -var masterCmd = &cobra.Command{ - Use: "master", - Short: "Run Kubernetes benchmark checks from the master.yaml file.", - Long: `Run Kubernetes benchmark checks from the master.yaml file in cfg/.`, - Run: func(cmd *cobra.Command, args []string) { - bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformInfo(), viper.GetViper()) - if err != nil { - exitWithError(fmt.Errorf("unable to determine benchmark version: %v", err)) - } - - filename := loadConfig(check.MASTER, bv) - runChecks(check.MASTER, filename, detecetedKubeVersion) - writeOutput(controlsCollection) - os.Exit(exitCodeSelection(controlsCollection)) - }, - Deprecated: "this command will be retired soon. Please use the `run` command with `--targets=master` instead.", -} - -func init() { - masterCmd.PersistentFlags().StringVarP(&masterFile, - "file", - "f", - "/master.yaml", - "Alternative YAML file for master checks", - ) - - RootCmd.AddCommand(masterCmd) -} diff --git a/cmd/node.go b/cmd/node.go deleted file mode 100644 index 63e8e289e..000000000 --- a/cmd/node.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright © 2017-2019 Aqua Security Software Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cmd - -import ( - "fmt" - "os" - - "github.com/aquasecurity/kube-bench/check" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -// nodeCmd represents the node command -var nodeCmd = &cobra.Command{ - Use: "node", - Short: "Run Kubernetes benchmark checks from the node.yaml file.", - Long: `Run Kubernetes benchmark checks from the node.yaml file in cfg/.`, - Run: func(cmd *cobra.Command, args []string) { - bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformInfo(), viper.GetViper()) - if err != nil { - exitWithError(fmt.Errorf("unable to determine benchmark version: %v", err)) - } - - filename := loadConfig(check.NODE, bv) - runChecks(check.NODE, filename, detecetedKubeVersion) - writeOutput(controlsCollection) - os.Exit(exitCodeSelection(controlsCollection)) - }, - Deprecated: "this command will be retired soon. Please use the `run` command with `--targets=node` instead.", -} - -func init() { - nodeCmd.PersistentFlags().StringVarP(&nodeFile, - "file", - "f", - "/node.yaml", - "Alternative YAML file for node checks", - ) - - RootCmd.AddCommand(nodeCmd) -} From db385c7d09439ef612cdb8df06920634bad7ef6a Mon Sep 17 00:00:00 2001 From: skuethe <56306041+skuethe@users.noreply.github.com> Date: Wed, 23 Feb 2022 08:07:51 +0100 Subject: [PATCH 009/262] chore: add s390x arch (#1097) Signed-off-by: skuethe <56306041+skuethe@users.noreply.github.com> Co-authored-by: Huang Huang --- .github/workflows/publish.yml | 2 +- .goreleaser.yml | 1 + makefile | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index b5c56dce7..5c58c3a35 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -52,7 +52,7 @@ jobs: uses: docker/build-push-action@v2 with: context: . - platforms: linux/amd64,linux/arm64,linux/ppc64le + platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x builder: ${{ steps.buildx.outputs.name }} push: true build-args: | diff --git a/.goreleaser.yml b/.goreleaser.yml index 21966f9fb..fed2273a7 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -13,6 +13,7 @@ builds: - arm - arm64 - ppc64le + - s390x goarm: - 6 - 7 diff --git a/makefile b/makefile index d2f3c3951..c60705692 100644 --- a/makefile +++ b/makefile @@ -7,7 +7,7 @@ IMAGE_NAME ?= $(DOCKER_ORG)/$(BINARY):$(VERSION) GOOS ?= linux BUILD_OS := linux uname := $(shell uname -s) -BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm,linux/ppc64le +BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm,linux/ppc64le,linux/s390x DOCKER_ORGS ?= aquasec public.ecr.aws/aquasecurity GOARCH ?= $@ From d6c384101854a56880479b925b3e9dc35400577d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 27 Feb 2022 09:15:55 +0200 Subject: [PATCH 010/262] Bump gorm.io/driver/postgres from 1.2.2 to 1.3.1 (#1101) Bumps [gorm.io/driver/postgres](https://github.com/go-gorm/postgres) from 1.2.2 to 1.3.1. - [Release notes](https://github.com/go-gorm/postgres/releases) - [Commits](https://github.com/go-gorm/postgres/compare/v1.2.2...v1.3.1) --- updated-dependencies: - dependency-name: gorm.io/driver/postgres dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 4 ++-- go.sum | 29 +++++++++++++++-------------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index ba3296877..750cb84d4 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/spf13/viper v1.10.0 github.com/stretchr/testify v1.7.0 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.2.2 - gorm.io/gorm v1.22.3 + gorm.io/driver/postgres v1.3.1 + gorm.io/gorm v1.23.1 k8s.io/client-go v0.23.3 ) diff --git a/go.sum b/go.sum index bd2ba3b3f..4128ae71c 100644 --- a/go.sum +++ b/go.sum @@ -291,8 +291,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.10.0 h1:4EYhlDVEMsJ30nNj0mmgwIUXoq7e9sMJrVC2ED6QlCU= -github.com/jackc/pgconn v1.10.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.10.1 h1:DzdIHIjG1AxGwoEEqS+mGsURyjt4enSmqzACXvVzOT8= +github.com/jackc/pgconn v1.10.1/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -308,29 +308,31 @@ github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.1.1 h1:7PQ/4gLoqnl87ZxL7xjO0DR5gYuviDCZxQJsUlFW1eI= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.2.0 h1:r7JypeP2D3onoQTCxWdTpCtJ4D+qpKr0TxvoyMhZ5ns= +github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.8.1 h1:9k0IXtdJXHJbyAWQgbWr1lU+MEhPXZz6RIXxfR5oxXs= -github.com/jackc/pgtype v1.8.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.9.1 h1:MJc2s0MFS8C3ok1wQTdQxWuXQcB6+HwAm5x1CzW7mf0= +github.com/jackc/pgtype v1.9.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.13.0 h1:JCjhT5vmhMAf/YwBHLvrBn4OGdIQBiFG6ym8Zmdx570= -github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0= +github.com/jackc/pgx/v4 v4.14.1 h1:71oo1KAGI6mXhLiTMn6iDFcp3e7+zon/capWjl2OEFU= +github.com/jackc/pgx/v4 v4.14.1/go.mod h1:RgDuE4Z34o7XE92RpLsvFiOEfrAUT0Xt2KxvX73W06M= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.2.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.1.2 h1:eVKgfIdy9b6zbWBMgFpfDPoAMifwSZagU9HmEU6zgiI= -github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jinzhu/now v1.1.4 h1:tHnRBy1i5F2Dh8BAFxqFzxKqqvezXrL2OW1TnX+Mlas= +github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -979,11 +981,10 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.2.2 h1:Ka9W6feOU+rPM9m007eYLMD4QoZuYGBnQ3Jp0faGSwg= -gorm.io/driver/postgres v1.2.2/go.mod h1:Ik3tK+a3FMp8ORZl29v4b3M0RsgXsaeMXh9s9eVMXco= -gorm.io/gorm v1.22.2/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= -gorm.io/gorm v1.22.3 h1:/JS6z+GStEQvJNW3t1FTwJwG/gZ+A7crFdRqtvG5ehA= -gorm.io/gorm v1.22.3/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= +gorm.io/driver/postgres v1.3.1 h1:Pyv+gg1Gq1IgsLYytj/S2k7ebII3CzEdpqQkPOdH24g= +gorm.io/driver/postgres v1.3.1/go.mod h1:WwvWOuR9unCLpGWCL6Y3JOeBWvbKi6JLhayiVclSZZU= +gorm.io/gorm v1.23.1 h1:aj5IlhDzEPsoIyOPtTRVI+SyaN1u6k613sbt4pwbxG0= +gorm.io/gorm v1.23.1/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From a9bc826dc1064af8b0b71d5e798a78e46a7d7bbb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Mar 2022 10:40:35 +0200 Subject: [PATCH 011/262] Bump github.com/magiconair/properties from 1.8.5 to 1.8.6 (#1103) Bumps [github.com/magiconair/properties](https://github.com/magiconair/properties) from 1.8.5 to 1.8.6. - [Release notes](https://github.com/magiconair/properties/releases) - [Changelog](https://github.com/magiconair/properties/blob/main/CHANGELOG.md) - [Commits](https://github.com/magiconair/properties/compare/v1.8.5...v1.8.6) --- updated-dependencies: - dependency-name: github.com/magiconair/properties dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 750cb84d4..f4e7ef203 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/aws/aws-sdk-go v1.42.14 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/magiconair/properties v1.8.5 + github.com/magiconair/properties v1.8.6 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.3.0 diff --git a/go.sum b/go.sum index 4128ae71c..e213c066d 100644 --- a/go.sum +++ b/go.sum @@ -363,8 +363,9 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= -github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= From 10f1672bfe637016bfb1274291b6102ea20cf291 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Mar 2022 16:35:00 +0200 Subject: [PATCH 012/262] Bump gorm.io/gorm from 1.23.1 to 1.23.2 (#1106) Bumps [gorm.io/gorm](https://github.com/go-gorm/gorm) from 1.23.1 to 1.23.2. - [Release notes](https://github.com/go-gorm/gorm/releases) - [Commits](https://github.com/go-gorm/gorm/compare/v1.23.1...v1.23.2) --- updated-dependencies: - dependency-name: gorm.io/gorm dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index f4e7ef203..0785ecddb 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,6 @@ require ( github.com/stretchr/testify v1.7.0 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.1 - gorm.io/gorm v1.23.1 + gorm.io/gorm v1.23.2 k8s.io/client-go v0.23.3 ) diff --git a/go.sum b/go.sum index e213c066d..bafcee8cd 100644 --- a/go.sum +++ b/go.sum @@ -984,8 +984,9 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.3.1 h1:Pyv+gg1Gq1IgsLYytj/S2k7ebII3CzEdpqQkPOdH24g= gorm.io/driver/postgres v1.3.1/go.mod h1:WwvWOuR9unCLpGWCL6Y3JOeBWvbKi6JLhayiVclSZZU= -gorm.io/gorm v1.23.1 h1:aj5IlhDzEPsoIyOPtTRVI+SyaN1u6k613sbt4pwbxG0= gorm.io/gorm v1.23.1/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/gorm v1.23.2 h1:xmq9QRMWL8HTJyhAUBXy8FqIIQCYESeKfJL4DoGKiWQ= +gorm.io/gorm v1.23.2/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From e2599b1e06e8414072c315c56c014a71ddb1060f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Mar 2022 16:46:34 +0200 Subject: [PATCH 013/262] Bump golang from 1.17.6 to 1.17.7 (#1095) Bumps golang from 1.17.6 to 1.17.7. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Huang Huang Co-authored-by: chenk --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index ded9d9147..6f2b656eb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.6 AS build +FROM golang:1.17.7 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From fb10678a88475d8ef4ae02255ac318a1cff1cf30 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Mar 2022 16:56:28 +0200 Subject: [PATCH 014/262] Bump k8s.io/client-go from 0.23.3 to 0.23.4 (#1100) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.23.3 to 0.23.4. - [Release notes](https://github.com/kubernetes/client-go/releases) - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.23.3...v0.23.4) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 0785ecddb..039b07864 100644 --- a/go.mod +++ b/go.mod @@ -15,5 +15,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.1 gorm.io/gorm v1.23.2 - k8s.io/client-go v0.23.3 + k8s.io/client-go v0.23.4 ) diff --git a/go.sum b/go.sum index bafcee8cd..995a0d2bc 100644 --- a/go.sum +++ b/go.sum @@ -994,10 +994,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/client-go v0.23.3 h1:23QYUmCQ/W6hW78xIwm3XqZrrKZM+LWDqW2zfo+szJs= -k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE= +k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= +k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/client-go v0.23.4 h1:YVWvPeerA2gpUudLelvsolzH7c2sFoXXR5wM/sWqNFU= +k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= From 2595727c38cece8978f643056a4059230aef193a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Mar 2022 17:06:06 +0200 Subject: [PATCH 015/262] Bump actions/setup-python from 2 to 3 (#1102) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 2 to 3. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/mkdocs-deploy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mkdocs-deploy.yaml b/.github/workflows/mkdocs-deploy.yaml index 278b6a7cc..f58b7916b 100644 --- a/.github/workflows/mkdocs-deploy.yaml +++ b/.github/workflows/mkdocs-deploy.yaml @@ -20,7 +20,7 @@ jobs: with: fetch-depth: 0 persist-credentials: true - - uses: actions/setup-python@v2 + - uses: actions/setup-python@v3 with: python-version: 3.x - run: | From 1e592d5f41e1032ef77a05f7e39580c494d497ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Mar 2022 17:13:47 +0200 Subject: [PATCH 016/262] Bump github.com/aws/aws-sdk-go from 1.42.14 to 1.43.10 (#1108) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.42.14 to 1.43.10. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.42.14...v1.43.10) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 2 +- go.sum | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 039b07864..fdeb8e948 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.42.14 + github.com/aws/aws-sdk-go v1.43.10 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index 995a0d2bc..074f7bc62 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.42.14 h1:Eqwjl/cwRY9z0TTU4RGpiElWo9oPzG+Y8r5Thu6Ug5A= -github.com/aws/aws-sdk-go v1.42.14/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go v1.43.10 h1:lFX6gzTBltYBnlJBjd2DWRCmqn2CbTcs6PW99/Dme7k= +github.com/aws/aws-sdk-go v1.43.10/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -619,10 +619,10 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -724,11 +724,13 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211205182925-97ca703d548d h1:FjkYO/PPp4Wi0EAUOVLxePm7qVW4r4ctbWpURyuOD0E= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 0d9e05ed86a6b92e5bf3df39603fa6e48473825b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Mar 2022 10:59:31 +0200 Subject: [PATCH 017/262] Bump github.com/aws/aws-sdk-go from 1.43.10 to 1.43.12 (#1111) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.43.10 to 1.43.12. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.43.10...v1.43.12) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fdeb8e948..ca51850bb 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.43.10 + github.com/aws/aws-sdk-go v1.43.12 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index 074f7bc62..795f3ca01 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.43.10 h1:lFX6gzTBltYBnlJBjd2DWRCmqn2CbTcs6PW99/Dme7k= -github.com/aws/aws-sdk-go v1.43.10/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.43.12 h1:wOdx6+reSDpUBFEuJDA6edCrojzy8rOtMzhS2rD9+7M= +github.com/aws/aws-sdk-go v1.43.12/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From 675515a9b0d8cb5acb1b52c8a8575f702e9aec81 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Mar 2022 11:08:52 +0200 Subject: [PATCH 018/262] Bump golang from 1.17.7 to 1.17.8 (#1110) Bumps golang from 1.17.7 to 1.17.8. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 6f2b656eb..9615f5931 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.7 AS build +FROM golang:1.17.8 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 9e5d95eb604194d5120337caff67c8c8579842a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Mar 2022 11:22:17 +0200 Subject: [PATCH 019/262] Bump actions/checkout from 2 to 3 (#1109) Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/build.yml | 8 ++++---- .github/workflows/mkdocs-deploy.yaml | 2 +- .github/workflows/publish.yml | 2 +- .github/workflows/release.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d05f631db..c8e5f71a7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -28,7 +28,7 @@ jobs: with: go-version: 1.16 - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: yaml-lint uses: ibiqlik/action-yamllint@v3 unit: @@ -40,7 +40,7 @@ jobs: with: go-version: 1.16 - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Run unit tests run: make tests - name: Upload code coverage @@ -56,7 +56,7 @@ jobs: with: go-version: 1.16 - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Setup Kubernetes cluster (KIND) uses: engineerd/setup-kind@v0.5.0 with: @@ -86,7 +86,7 @@ jobs: with: go-version: 1.16 - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Dry-run release snapshot uses: goreleaser/goreleaser-action@v2 with: diff --git a/.github/workflows/mkdocs-deploy.yaml b/.github/workflows/mkdocs-deploy.yaml index f58b7916b..a55bcd5a2 100644 --- a/.github/workflows/mkdocs-deploy.yaml +++ b/.github/workflows/mkdocs-deploy.yaml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-18.04 steps: - name: Checkout main - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 persist-credentials: true diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 5c58c3a35..7b7cbe18a 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-18.04 steps: - name: Check Out Repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 77ff019a2..c88fa8d27 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,7 +19,7 @@ jobs: with: go-version: 1.16 - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Run unit tests run: make tests - name: Setup Kubernetes cluster (KIND) From c28e7a796e0e87bfcb85a6e2a28051c9cbd7c84c Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Sun, 13 Mar 2022 15:27:25 +0800 Subject: [PATCH 020/262] Fixed typo in policies.yaml (#1113) --- cfg/aks-1.0/policies.yaml | 2 +- cfg/eks-1.0.1/policies.yaml | 2 +- cfg/gke-1.0/policies.yaml | 2 +- cfg/gke-1.2.0/policies.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cfg/aks-1.0/policies.yaml b/cfg/aks-1.0/policies.yaml index 89f48a54b..9cfde1e3f 100644 --- a/cfg/aks-1.0/policies.yaml +++ b/cfg/aks-1.0/policies.yaml @@ -37,7 +37,7 @@ groups: - id: 4.1.4 text: "Minimize access to create pods (Manual)" type: "manual" - Remediation: | + remediation: | Where possible, remove create access to pod objects in the cluster. scored: false diff --git a/cfg/eks-1.0.1/policies.yaml b/cfg/eks-1.0.1/policies.yaml index 3b9aa8b18..cf41ab984 100644 --- a/cfg/eks-1.0.1/policies.yaml +++ b/cfg/eks-1.0.1/policies.yaml @@ -37,7 +37,7 @@ groups: - id: 4.1.4 text: "Minimize access to create pods (Manual)" type: "manual" - Remediation: | + remediation: | Where possible, remove create access to pod objects in the cluster. scored: false diff --git a/cfg/gke-1.0/policies.yaml b/cfg/gke-1.0/policies.yaml index 54ce12bd9..0278ec0b5 100644 --- a/cfg/gke-1.0/policies.yaml +++ b/cfg/gke-1.0/policies.yaml @@ -37,7 +37,7 @@ groups: - id: 5.1.4 text: "Minimize access to create pods (Not Scored)" type: "manual" - Remediation: | + remediation: | Where possible, remove create access to pod objects in the cluster. scored: false diff --git a/cfg/gke-1.2.0/policies.yaml b/cfg/gke-1.2.0/policies.yaml index e1261c0e3..6ecb1ff70 100644 --- a/cfg/gke-1.2.0/policies.yaml +++ b/cfg/gke-1.2.0/policies.yaml @@ -37,7 +37,7 @@ groups: - id: 4.1.4 text: "Minimize access to create pods (Manual)" type: "manual" - Remediation: | + remediation: | Where possible, remove create access to pod objects in the cluster. scored: false From c72c9e9e255381ce85836f2a5a2ff9c9f7dcd819 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Mar 2022 09:11:04 +0200 Subject: [PATCH 021/262] Bump github.com/aws/aws-sdk-go from 1.43.12 to 1.43.18 (#1115) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.43.12 to 1.43.18. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.43.12...v1.43.18) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ca51850bb..d13fb1270 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.43.12 + github.com/aws/aws-sdk-go v1.43.18 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index 795f3ca01..88894438b 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.43.12 h1:wOdx6+reSDpUBFEuJDA6edCrojzy8rOtMzhS2rD9+7M= -github.com/aws/aws-sdk-go v1.43.12/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.43.18 h1:nwLaIz2m1f7YBEMNyEc6bBB276AIEaGaIQrc2G9h4zY= +github.com/aws/aws-sdk-go v1.43.18/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From f0fb1b6ea2f4477b9f37941a284e6dde9961cbd4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Mar 2022 09:33:58 +0200 Subject: [PATCH 022/262] Bump github.com/spf13/cobra from 1.3.0 to 1.4.0 (#1114) Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.3.0 to 1.4.0. - [Release notes](https://github.com/spf13/cobra/releases) - [Changelog](https://github.com/spf13/cobra/blob/master/CHANGELOG.md) - [Commits](https://github.com/spf13/cobra/compare/v1.3.0...v1.4.0) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d13fb1270..b5d47d819 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/magiconair/properties v1.8.6 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.3.0 + github.com/spf13/cobra v1.4.0 github.com/spf13/viper v1.10.0 github.com/stretchr/testify v1.7.0 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 88894438b..de887562d 100644 --- a/go.sum +++ b/go.sum @@ -468,8 +468,8 @@ github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0= -github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= From 021bb13b05bbe3058a747e404e58b6d98d68aa8b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Mar 2022 17:23:08 +0200 Subject: [PATCH 023/262] Bump golang from 1.17.8 to 1.18.0 (#1118) Bumps golang from 1.17.8 to 1.18.0. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 9615f5931..f26730957 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.8 AS build +FROM golang:1.18.0 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 55c5b981cf99e157d68fa19b92a92ce535cd6005 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Mar 2022 09:38:08 +0200 Subject: [PATCH 024/262] Bump alpine from 3.15.0 to 3.15.1 (#1119) Bumps alpine from 3.15.0 to 3.15.1. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index f26730957..4a11ed272 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.15.0 AS run +FROM alpine:3.15.1 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 From ccded4277a636eae183168c19d1e8df84142abae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Mar 2022 09:47:42 +0200 Subject: [PATCH 025/262] Bump actions/cache from 2 to 3 (#1120) Bumps [actions/cache](https://github.com/actions/cache) from 2 to 3. - [Release notes](https://github.com/actions/cache/releases) - [Commits](https://github.com/actions/cache/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 7b7cbe18a..0ea849a21 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -22,7 +22,7 @@ jobs: id: buildx uses: docker/setup-buildx-action@v1 - name: Cache Docker layers - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildxarch-${{ github.sha }} From 8a2699da44afaf23daa0a582275ebe343526baca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Mar 2022 09:59:11 +0200 Subject: [PATCH 026/262] Bump github.com/aws/aws-sdk-go from 1.43.18 to 1.43.22 (#1121) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.43.18 to 1.43.22. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.43.18...v1.43.22) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b5d47d819..5c2c82446 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.43.18 + github.com/aws/aws-sdk-go v1.43.22 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index de887562d..1b00f05b1 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.43.18 h1:nwLaIz2m1f7YBEMNyEc6bBB276AIEaGaIQrc2G9h4zY= -github.com/aws/aws-sdk-go v1.43.18/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.43.22 h1:QY9/1TZB73UDEVQ68sUVJXf/7QUiHZl7zbbLF1wpqlc= +github.com/aws/aws-sdk-go v1.43.22/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From 08e3b371da78725b409bd191a1508e04592c3fdf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Mar 2022 10:08:03 +0200 Subject: [PATCH 027/262] Bump github.com/stretchr/testify from 1.7.0 to 1.7.1 (#1122) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.0 to 1.7.1. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.7.0...v1.7.1) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 5c2c82446..32c27c667 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.4.0 github.com/spf13/viper v1.10.0 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.7.1 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.1 gorm.io/gorm v1.23.2 diff --git a/go.sum b/go.sum index 1b00f05b1..5f574e5eb 100644 --- a/go.sum +++ b/go.sum @@ -486,8 +486,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= From 9d01d2a3937065a3f148e55d9143d1b5aad40b6c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Mar 2022 10:22:44 +0200 Subject: [PATCH 028/262] Bump gorm.io/gorm from 1.23.2 to 1.23.3 (#1123) Bumps [gorm.io/gorm](https://github.com/go-gorm/gorm) from 1.23.2 to 1.23.3. - [Release notes](https://github.com/go-gorm/gorm/releases) - [Commits](https://github.com/go-gorm/gorm/compare/v1.23.2...v1.23.3) --- updated-dependencies: - dependency-name: gorm.io/gorm dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 32c27c667..b812db522 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,6 @@ require ( github.com/stretchr/testify v1.7.1 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.1 - gorm.io/gorm v1.23.2 + gorm.io/gorm v1.23.3 k8s.io/client-go v0.23.4 ) diff --git a/go.sum b/go.sum index 5f574e5eb..aa3bbad24 100644 --- a/go.sum +++ b/go.sum @@ -988,8 +988,8 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gorm.io/driver/postgres v1.3.1 h1:Pyv+gg1Gq1IgsLYytj/S2k7ebII3CzEdpqQkPOdH24g= gorm.io/driver/postgres v1.3.1/go.mod h1:WwvWOuR9unCLpGWCL6Y3JOeBWvbKi6JLhayiVclSZZU= gorm.io/gorm v1.23.1/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= -gorm.io/gorm v1.23.2 h1:xmq9QRMWL8HTJyhAUBXy8FqIIQCYESeKfJL4DoGKiWQ= -gorm.io/gorm v1.23.2/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/gorm v1.23.3 h1:jYh3nm7uLZkrMVfA8WVNjDZryKfr7W+HTlInVgKFJAg= +gorm.io/gorm v1.23.3/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 85020ff4746d8e4e425024faec017c699ad12e0c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 23 Mar 2022 10:32:40 +0200 Subject: [PATCH 029/262] Bump k8s.io/client-go from 0.23.4 to 0.23.5 (#1124) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.23.4 to 0.23.5. - [Release notes](https://github.com/kubernetes/client-go/releases) - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.23.4...v0.23.5) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index b812db522..88b5669e4 100644 --- a/go.mod +++ b/go.mod @@ -15,5 +15,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.1 gorm.io/gorm v1.23.3 - k8s.io/client-go v0.23.4 + k8s.io/client-go v0.23.5 ) diff --git a/go.sum b/go.sum index aa3bbad24..71260f634 100644 --- a/go.sum +++ b/go.sum @@ -997,10 +997,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= -k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/client-go v0.23.4 h1:YVWvPeerA2gpUudLelvsolzH7c2sFoXXR5wM/sWqNFU= -k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= +k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8= +k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8= +k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= From be157a8a5ad9f931a107ab39db2ab30b7f4cbd00 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Sun, 27 Mar 2022 14:36:24 +0800 Subject: [PATCH 030/262] ASFF: no longer include timestamp in the finding ID (#1127) --- check/controls.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/check/controls.go b/check/controls.go index f84acf7b6..78261ba02 100644 --- a/check/controls.go +++ b/check/controls.go @@ -249,7 +249,7 @@ func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { AwsAccountId: aws.String(a), Confidence: aws.Int64(100), GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", arn, controls.Version, check.ID)), - Id: aws.String(fmt.Sprintf("%s%sEKSnodeID+%s%s", arn, a, check.ID, tf)), + Id: aws.String(fmt.Sprintf("%s%sEKSnodeID+%s", arn, a, check.ID)), CreatedAt: aws.String(tf), Description: aws.String(check.Text), ProductArn: aws.String(arn), From 70a544e916efacb7ecdb7f77dd3c74d17321af3e Mon Sep 17 00:00:00 2001 From: Luis Serra <74016165+luis-serra-ki@users.noreply.github.com> Date: Mon, 28 Mar 2022 15:40:04 +0100 Subject: [PATCH 031/262] chore(message): fix wrong PGSQL_DBNAME error message (#1128) --- cmd/database.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/database.go b/cmd/database.go index 6732ede64..17d202dd5 100644 --- a/cmd/database.go +++ b/cmd/database.go @@ -38,7 +38,7 @@ func getPsqlConnInfo() (PsqlConnInfo, error) { if value := viper.GetString("PGSQL_DBNAME"); value != "" { dbName = value } else { - return PsqlConnInfo{}, fmt.Errorf("%s_PGSQL_USER env var is required", envVarsPrefix) + return PsqlConnInfo{}, fmt.Errorf("%s_PGSQL_DBNAME env var is required", envVarsPrefix) } var sslMode string From b3902d2d145c5b67450fad575ccdf3a1ed66ca31 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Mar 2022 09:32:10 +0300 Subject: [PATCH 032/262] Bump alpine from 3.15.1 to 3.15.2 (#1130) Bumps alpine from 3.15.1 to 3.15.2. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 4a11ed272..4fb4893e8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.15.1 AS run +FROM alpine:3.15.2 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 From 74d91d2941181edfb57bb801d0c6a9e772b63a84 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 29 Mar 2022 11:01:42 +0300 Subject: [PATCH 033/262] Bump github.com/aws/aws-sdk-go from 1.43.22 to 1.43.27 (#1131) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.43.22 to 1.43.27. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.43.22...v1.43.27) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 88b5669e4..ff42da9cb 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.43.22 + github.com/aws/aws-sdk-go v1.43.27 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index 71260f634..6fe6cf83d 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.43.22 h1:QY9/1TZB73UDEVQ68sUVJXf/7QUiHZl7zbbLF1wpqlc= -github.com/aws/aws-sdk-go v1.43.22/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.43.27 h1:de0u6uSrtjPtSZ7I/452GUU2iNDdbYIawR2qkYoD1z0= +github.com/aws/aws-sdk-go v1.43.27/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From faa1e88d0d4185363f0d669b5c6065b731a58044 Mon Sep 17 00:00:00 2001 From: chenk Date: Sun, 3 Apr 2022 12:00:08 +0300 Subject: [PATCH 034/262] release: prepare v0.6.7-rc1 (#1136) Signed-off-by: chenk --- .gitignore | 1 + job.yaml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index c63052575..36a125394 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ coverage.txt thumbs.db /kubeconfig.kube-bench /test.data +*.iml \ No newline at end of file diff --git a/job.yaml b/job.yaml index bf1adf7a5..f34824b6c 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:v0.6.6 + image: aquasec/kube-bench:v0.6.7-rc1 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From 10ab72abfbcbd635c154a59d0ff44cb54a0bf9bd Mon Sep 17 00:00:00 2001 From: chenk Date: Sun, 3 Apr 2022 14:27:06 +0300 Subject: [PATCH 035/262] release: v0.6.7 (#1137) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index f34824b6c..ef609f72e 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:v0.6.7-rc1 + image: aquasec/kube-bench:v0.6.7 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From e0fe5698a0d2105676bc5d79afe1533546f2f293 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Tue, 5 Apr 2022 21:25:45 +0800 Subject: [PATCH 036/262] chore(lint): setup golangci-lint (#1144) * chore(lint): setup golangci-lint * linters: gofmt, goimports and misspell * Update build.yml Co-authored-by: Matthieu MOREL Co-authored-by: Yoav Rotem --- .github/workflows/build.yml | 5 + .golangci.yaml | 12 + check/check.go | 2 +- check/check_test.go | 1 - check/controls.go | 2 + check/test.go | 11 +- check/test_test.go | 498 +++++++++++++++++++++------------ cmd/common.go | 5 +- cmd/common_test.go | 3 - cmd/kubernetes_version_test.go | 7 +- cmd/root.go | 1 - cmd/securityHub.go | 5 +- cmd/util.go | 38 ++- cmd/util_test.go | 24 +- cmd/version.go | 1 + 15 files changed, 386 insertions(+), 229 deletions(-) create mode 100644 .golangci.yaml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c8e5f71a7..529c75920 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -31,6 +31,11 @@ jobs: uses: actions/checkout@v3 - name: yaml-lint uses: ibiqlik/action-yamllint@v3 + - name: Setup golangci-lint + uses: golangci/golangci-lint-action@v2 + with: + version: latest + args: --verbose unit: name: Unit tests runs-on: ubuntu-18.04 diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 000000000..f8ba09feb --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,12 @@ +--- +linters: + disable-all: true + enable: + - deadcode + - gocyclo + - gofmt + - goimports + - govet + - misspell + - typecheck + - varcheck diff --git a/check/check.go b/check/check.go index a6ebdb03b..06fac0548 100644 --- a/check/check.go +++ b/check/check.go @@ -229,7 +229,7 @@ func (c *Check) execute() (finalOutput *testOutput, err error) { // Check for AuditConfigOutput only if AuditConfig is set if !result.flagFound && c.AuditConfig != "" { - //t.isConfigSetting = true + // t.isConfigSetting = true t.auditUsed = AuditConfig result = *(t.execute(c.AuditConfigOutput)) if !result.flagFound && t.Env != "" { diff --git a/check/check_test.go b/check/check_test.go index fd18927bf..79eb0463c 100644 --- a/check/check_test.go +++ b/check/check_test.go @@ -115,7 +115,6 @@ func TestCheckAuditEnv(t *testing.T) { } func TestCheckAuditConfig(t *testing.T) { - passingCases := []*Check{ controls.Groups[1].Checks[0], controls.Groups[1].Checks[3], diff --git a/check/controls.go b/check/controls.go index 78261ba02..060198cef 100644 --- a/check/controls.go +++ b/check/controls.go @@ -285,6 +285,7 @@ func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { } return fs, nil } + func getConfig(name string) (string, error) { r := viper.GetString(name) if len(r) == 0 { @@ -292,6 +293,7 @@ func getConfig(name string) (string, error) { } return r, nil } + func summarize(controls *Controls, state State) { switch state { case PASS: diff --git a/check/test.go b/check/test.go index 212ea182e..bb366230b 100644 --- a/check/test.go +++ b/check/test.go @@ -68,9 +68,11 @@ type testItem struct { auditUsed AuditUsed } -type envTestItem testItem -type pathTestItem testItem -type flagTestItem testItem +type ( + envTestItem testItem + pathTestItem testItem + flagTestItem testItem +) type compare struct { Op string @@ -236,7 +238,7 @@ func (t testItem) evaluate(s string) *testOutput { } result.flagFound = match - var isExist = "exists" + isExist := "exists" if !result.flagFound { isExist = "does not exist" } @@ -255,7 +257,6 @@ func (t testItem) evaluate(s string) *testOutput { } func compareOp(tCompareOp string, flagVal string, tCompareValue string, flagName string) (string, bool) { - expectedResultPattern := "" testResult := false diff --git a/check/test_test.go b/check/test_test.go index 8cbad7210..29eb26922 100644 --- a/check/test_test.go +++ b/check/test_test.go @@ -46,7 +46,6 @@ func init() { } func TestTestExecute(t *testing.T) { - cases := []struct { check *Check str string @@ -305,7 +304,6 @@ func TestTestExecute(t *testing.T) { } func TestTestExecuteExceptions(t *testing.T) { - cases := []struct { *Check str string @@ -366,7 +364,8 @@ func TestTestUnmarshal(t *testing.T) { `, kubeletConfig{}, false, - }, { + }, + { ` kind: KubeletConfiguration address: 0.0.0.0 @@ -490,34 +489,42 @@ func TestAllElementsValid(t *testing.T) { }, { source: []string{}, - target: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + target: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256"}, + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", + }, valid: false, }, { source: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"}, - target: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + target: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256"}, + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", + }, valid: true, }, { source: []string{"blah"}, - target: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + target: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256"}, + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", + }, valid: false, }, { source: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "blah"}, - target: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + target: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256"}, + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", + }, valid: false, }, } @@ -594,494 +601,628 @@ func TestCompareOp(t *testing.T) { // Test Op "eq" {label: "op=eq, both empty", op: "eq", flagVal: "", compareValue: "", expectedResultPattern: "'' is equal to ''", testResult: true, flagName: ""}, - {label: "op=eq, true==true", op: "eq", flagVal: "true", + { + label: "op=eq, true==true", op: "eq", flagVal: "true", compareValue: "true", expectedResultPattern: "'parameterTrue' is equal to 'true'", testResult: true, - flagName: "parameterTrue"}, + flagName: "parameterTrue", + }, - {label: "op=eq, false==false", op: "eq", flagVal: "false", + { + label: "op=eq, false==false", op: "eq", flagVal: "false", compareValue: "false", expectedResultPattern: "'parameterFalse' is equal to 'false'", testResult: true, - flagName: "parameterFalse"}, + flagName: "parameterFalse", + }, - {label: "op=eq, false==true", op: "eq", flagVal: "false", + { + label: "op=eq, false==true", op: "eq", flagVal: "false", compareValue: "true", expectedResultPattern: "'parameterFalse' is equal to 'true'", testResult: false, - flagName: "parameterFalse"}, + flagName: "parameterFalse", + }, - {label: "op=eq, strings match", op: "eq", flagVal: "KubeletConfiguration", + { + label: "op=eq, strings match", op: "eq", flagVal: "KubeletConfiguration", compareValue: "KubeletConfiguration", expectedResultPattern: "'--FlagNameKubeletConf' is equal to 'KubeletConfiguration'", testResult: true, - flagName: "--FlagNameKubeletConf"}, + flagName: "--FlagNameKubeletConf", + }, - {label: "op=eq, flagVal=empty", op: "eq", flagVal: "", + { + label: "op=eq, flagVal=empty", op: "eq", flagVal: "", compareValue: "KubeletConfiguration", expectedResultPattern: "'--FlagNameKubeletConf' is equal to 'KubeletConfiguration'", testResult: false, - flagName: "--FlagNameKubeletConf"}, + flagName: "--FlagNameKubeletConf", + }, - {label: "op=eq, compareValue=empty", + { + label: "op=eq, compareValue=empty", op: "eq", flagVal: "KubeletConfiguration", compareValue: "", expectedResultPattern: "'--FlagNameKubeletConf' is equal to ''", testResult: false, - flagName: "--FlagNameKubeletConf"}, + flagName: "--FlagNameKubeletConf", + }, // Test Op "noteq" - {label: "op=noteq, both empty", + { + label: "op=noteq, both empty", op: "noteq", flagVal: "", compareValue: "", expectedResultPattern: "'parameter' is not equal to ''", testResult: false, - flagName: "parameter"}, + flagName: "parameter", + }, - {label: "op=noteq, true!=true", + { + label: "op=noteq, true!=true", op: "noteq", flagVal: "true", compareValue: "true", expectedResultPattern: "'parameterTrue' is not equal to 'true'", testResult: false, - flagName: "parameterTrue"}, + flagName: "parameterTrue", + }, - {label: "op=noteq, false!=false", + { + label: "op=noteq, false!=false", op: "noteq", flagVal: "false", compareValue: "false", expectedResultPattern: "'parameterFalse' is not equal to 'false'", testResult: false, - flagName: "parameterFalse"}, + flagName: "parameterFalse", + }, - {label: "op=noteq, false!=true", + { + label: "op=noteq, false!=true", op: "noteq", flagVal: "false", compareValue: "true", expectedResultPattern: "'parameterFalse' is not equal to 'true'", testResult: true, - flagName: "parameterFalse"}, + flagName: "parameterFalse", + }, - {label: "op=noteq, strings match", + { + label: "op=noteq, strings match", op: "noteq", flagVal: "KubeletConfiguration", compareValue: "KubeletConfiguration", expectedResultPattern: "'--FlagNameKubeletConf' is not equal to 'KubeletConfiguration'", testResult: false, - flagName: "--FlagNameKubeletConf"}, + flagName: "--FlagNameKubeletConf", + }, - {label: "op=noteq, flagVal=empty", + { + label: "op=noteq, flagVal=empty", op: "noteq", flagVal: "", compareValue: "KubeletConfiguration", expectedResultPattern: "'--FlagNameKubeletConf' is not equal to 'KubeletConfiguration'", testResult: true, - flagName: "--FlagNameKubeletConf"}, + flagName: "--FlagNameKubeletConf", + }, - {label: "op=noteq, compareValue=empty", + { + label: "op=noteq, compareValue=empty", op: "noteq", flagVal: "KubeletConfiguration", compareValue: "", expectedResultPattern: "'--FlagNameKubeletConf' is not equal to ''", testResult: true, - flagName: "--FlagNameKubeletConf"}, + flagName: "--FlagNameKubeletConf", + }, // Test Op "gt" - {label: "op=gt, both empty", + { + label: "op=gt, both empty", op: "gt", flagVal: "", compareValue: "", expectedResultPattern: "Invalid Number(s) used for comparison: '' ''", testResult: false, - flagName: "flagName"}, - {label: "op=gt, 0 > 0", + flagName: "flagName", + }, + { + label: "op=gt, 0 > 0", op: "gt", flagVal: "0", compareValue: "0", expectedResultPattern: "'flagName' is greater than 0", testResult: false, - flagName: "flagName"}, - {label: "op=gt, 4 > 5", + flagName: "flagName", + }, + { + label: "op=gt, 4 > 5", op: "gt", flagVal: "4", compareValue: "5", expectedResultPattern: "'flagName' is greater than 5", testResult: false, - flagName: "flagName"}, - {label: "op=gt, 5 > 4", + flagName: "flagName", + }, + { + label: "op=gt, 5 > 4", op: "gt", flagVal: "5", compareValue: "4", expectedResultPattern: "'flagName' is greater than 4", testResult: true, - flagName: "flagName"}, - {label: "op=gt, 5 > 5", + flagName: "flagName", + }, + { + label: "op=gt, 5 > 5", op: "gt", flagVal: "5", compareValue: "5", expectedResultPattern: "'flagName' is greater than 5", testResult: false, - flagName: "flagName"}, - {label: "op=gt, Pikachu > 5", + flagName: "flagName", + }, + { + label: "op=gt, Pikachu > 5", op: "gt", flagVal: "Pikachu", compareValue: "5", expectedResultPattern: "Invalid Number(s) used for comparison: 'Pikachu' '5'", testResult: false, - flagName: "flagName"}, - {label: "op=gt, 5 > Bulbasaur", + flagName: "flagName", + }, + { + label: "op=gt, 5 > Bulbasaur", op: "gt", flagVal: "5", compareValue: "Bulbasaur", expectedResultPattern: "Invalid Number(s) used for comparison: '5' 'Bulbasaur'", testResult: false, - flagName: "flagName"}, + flagName: "flagName", + }, // Test Op "lt" - {label: "op=lt, both empty", + { + label: "op=lt, both empty", op: "lt", flagVal: "", compareValue: "", expectedResultPattern: "Invalid Number(s) used for comparison: '' ''", testResult: false, - flagName: "flagName"}, - {label: "op=lt, 0 < 0", + flagName: "flagName", + }, + { + label: "op=lt, 0 < 0", op: "lt", flagVal: "0", compareValue: "0", expectedResultPattern: "'flagName' is lower than 0", testResult: false, - flagName: "flagName"}, - {label: "op=lt, 4 < 5", + flagName: "flagName", + }, + { + label: "op=lt, 4 < 5", op: "lt", flagVal: "4", compareValue: "5", expectedResultPattern: "'flagName' is lower than 5", testResult: true, - flagName: "flagName"}, - {label: "op=lt, 5 < 4", + flagName: "flagName", + }, + { + label: "op=lt, 5 < 4", op: "lt", flagVal: "5", compareValue: "4", expectedResultPattern: "'flagName' is lower than 4", testResult: false, - flagName: "flagName"}, - {label: "op=lt, 5 < 5", + flagName: "flagName", + }, + { + label: "op=lt, 5 < 5", op: "lt", flagVal: "5", compareValue: "5", expectedResultPattern: "'flagName' is lower than 5", testResult: false, - flagName: "flagName"}, - {label: "op=lt, Charmander < 5", + flagName: "flagName", + }, + { + label: "op=lt, Charmander < 5", op: "lt", flagVal: "Charmander", compareValue: "5", expectedResultPattern: "Invalid Number(s) used for comparison: 'Charmander' '5'", testResult: false, - flagName: "flagName"}, - {label: "op=lt, 5 < Charmeleon", + flagName: "flagName", + }, + { + label: "op=lt, 5 < Charmeleon", op: "lt", flagVal: "5", compareValue: "Charmeleon", expectedResultPattern: "Invalid Number(s) used for comparison: '5' 'Charmeleon'", testResult: false, - flagName: "flagName"}, + flagName: "flagName", + }, // Test Op "gte" - {label: "op=gte, both empty", + { + label: "op=gte, both empty", op: "gte", flagVal: "", compareValue: "", expectedResultPattern: "Invalid Number(s) used for comparison: '' ''", testResult: false, - flagName: "flagName"}, - {label: "op=gte, 0 >= 0", + flagName: "flagName", + }, + { + label: "op=gte, 0 >= 0", op: "gte", flagVal: "0", compareValue: "0", expectedResultPattern: "'flagName' is greater or equal to 0", testResult: true, - flagName: "flagName"}, - {label: "op=gte, 4 >= 5", + flagName: "flagName", + }, + { + label: "op=gte, 4 >= 5", op: "gte", flagVal: "4", compareValue: "5", expectedResultPattern: "'flagName' is greater or equal to 5", testResult: false, - flagName: "flagName"}, - {label: "op=gte, 5 >= 4", + flagName: "flagName", + }, + { + label: "op=gte, 5 >= 4", op: "gte", flagVal: "5", compareValue: "4", expectedResultPattern: "'flagName' is greater or equal to 4", testResult: true, - flagName: "flagName"}, - {label: "op=gte, 5 >= 5", + flagName: "flagName", + }, + { + label: "op=gte, 5 >= 5", op: "gte", flagVal: "5", compareValue: "5", expectedResultPattern: "'flagName' is greater or equal to 5", testResult: true, - flagName: "flagName"}, - {label: "op=gte, Ekans >= 5", + flagName: "flagName", + }, + { + label: "op=gte, Ekans >= 5", op: "gte", flagVal: "Ekans", compareValue: "5", expectedResultPattern: "Invalid Number(s) used for comparison: 'Ekans' '5'", testResult: false, - flagName: "flagName"}, - {label: "op=gte, 4 >= Zubat", + flagName: "flagName", + }, + { + label: "op=gte, 4 >= Zubat", op: "gte", flagVal: "4", compareValue: "Zubat", expectedResultPattern: "Invalid Number(s) used for comparison: '4' 'Zubat'", testResult: false, - flagName: "flagName"}, + flagName: "flagName", + }, // Test Op "lte" - {label: "op=lte, both empty", + { + label: "op=lte, both empty", op: "lte", flagVal: "", compareValue: "", expectedResultPattern: "Invalid Number(s) used for comparison: '' ''", testResult: false, - flagName: "flagName"}, - {label: "op=lte, 0 <= 0", + flagName: "flagName", + }, + { + label: "op=lte, 0 <= 0", op: "lte", flagVal: "0", compareValue: "0", expectedResultPattern: "'flagName' is lower or equal to 0", testResult: true, - flagName: "flagName"}, - {label: "op=lte, 4 <= 5", + flagName: "flagName", + }, + { + label: "op=lte, 4 <= 5", op: "lte", flagVal: "4", compareValue: "5", expectedResultPattern: "'flagName' is lower or equal to 5", testResult: true, - flagName: "flagName"}, - {label: "op=lte, 5 <= 4", + flagName: "flagName", + }, + { + label: "op=lte, 5 <= 4", op: "lte", flagVal: "5", compareValue: "4", expectedResultPattern: "'flagName' is lower or equal to 4", testResult: false, - flagName: "flagName"}, - {label: "op=lte, 5 <= 5", + flagName: "flagName", + }, + { + label: "op=lte, 5 <= 5", op: "lte", flagVal: "5", compareValue: "5", expectedResultPattern: "'flagName' is lower or equal to 5", testResult: true, - flagName: "flagName"}, - {label: "op=lte, Venomoth <= 4", + flagName: "flagName", + }, + { + label: "op=lte, Venomoth <= 4", op: "lte", flagVal: "Venomoth", compareValue: "4", expectedResultPattern: "Invalid Number(s) used for comparison: 'Venomoth' '4'", testResult: false, - flagName: "flagName"}, - {label: "op=lte, 5 <= Meowth", + flagName: "flagName", + }, + { + label: "op=lte, 5 <= Meowth", op: "lte", flagVal: "5", compareValue: "Meowth", expectedResultPattern: "Invalid Number(s) used for comparison: '5' 'Meowth'", testResult: false, - flagName: "flagName"}, + flagName: "flagName", + }, // Test Op "has" - {label: "op=has, both empty", + { + label: "op=has, both empty", op: "has", flagVal: "", compareValue: "", expectedResultPattern: "'flagName' has ''", testResult: true, - flagName: "flagName"}, - {label: "op=has, flagVal=empty", + flagName: "flagName", + }, + { + label: "op=has, flagVal=empty", op: "has", flagVal: "", compareValue: "blah", expectedResultPattern: "'flagName' has 'blah'", testResult: false, - flagName: "flagName"}, - {label: "op=has, compareValue=empty", + flagName: "flagName", + }, + { + label: "op=has, compareValue=empty", op: "has", flagVal: "blah", compareValue: "", expectedResultPattern: "'flagName-blah' has ''", testResult: true, - flagName: "flagName-blah"}, - {label: "op=has, 'blah' has 'la'", + flagName: "flagName-blah", + }, + { + label: "op=has, 'blah' has 'la'", op: "has", flagVal: "blah", compareValue: "la", expectedResultPattern: "'flagName-blah' has 'la'", testResult: true, - flagName: "flagName-blah"}, - {label: "op=has, 'blah' has 'LA'", + flagName: "flagName-blah", + }, + { + label: "op=has, 'blah' has 'LA'", op: "has", flagVal: "blah", compareValue: "LA", expectedResultPattern: "'flagName-blah' has 'LA'", testResult: false, - flagName: "flagName-blah"}, - {label: "op=has, 'blah' has 'lo'", + flagName: "flagName-blah", + }, + { + label: "op=has, 'blah' has 'lo'", op: "has", flagVal: "blah", compareValue: "lo", expectedResultPattern: "'flagName-blah' has 'lo'", testResult: false, - flagName: "flagName-blah"}, + flagName: "flagName-blah", + }, // Test Op "nothave" - {label: "op=nothave, both empty", + { + label: "op=nothave, both empty", op: "nothave", flagVal: "", compareValue: "", expectedResultPattern: "'flagName' does not have ''", testResult: false, - flagName: "flagName"}, - {label: "op=nothave, flagVal=empty", + flagName: "flagName", + }, + { + label: "op=nothave, flagVal=empty", op: "nothave", flagVal: "", compareValue: "blah", expectedResultPattern: "'flagName' does not have 'blah'", testResult: true, - flagName: "flagName"}, - {label: "op=nothave, compareValue=empty", + flagName: "flagName", + }, + { + label: "op=nothave, compareValue=empty", op: "nothave", flagVal: "blah", compareValue: "", expectedResultPattern: "'flagName-blah' does not have ''", testResult: false, - flagName: "flagName-blah"}, - {label: "op=nothave, 'blah' not have 'la'", + flagName: "flagName-blah", + }, + { + label: "op=nothave, 'blah' not have 'la'", op: "nothave", flagVal: "blah", compareValue: "la", expectedResultPattern: "'flagName-blah' does not have 'la'", testResult: false, - flagName: "flagName-blah"}, - {label: "op=nothave, 'blah' not have 'LA'", + flagName: "flagName-blah", + }, + { + label: "op=nothave, 'blah' not have 'LA'", op: "nothave", flagVal: "blah", compareValue: "LA", expectedResultPattern: "'flagName-blah' does not have 'LA'", testResult: true, - flagName: "flagName-blah"}, - {label: "op=nothave, 'blah' not have 'lo'", + flagName: "flagName-blah", + }, + { + label: "op=nothave, 'blah' not have 'lo'", op: "nothave", flagVal: "blah", compareValue: "lo", expectedResultPattern: "'flagName-blah' does not have 'lo'", testResult: true, - flagName: "flagName-blah"}, + flagName: "flagName-blah", + }, // Test Op "regex" - {label: "op=regex, both empty", + { + label: "op=regex, both empty", op: "regex", flagVal: "", compareValue: "", expectedResultPattern: "'flagName' matched by regex expression ''", testResult: true, - flagName: "flagName"}, - {label: "op=regex, flagVal=empty", + flagName: "flagName", + }, + { + label: "op=regex, flagVal=empty", op: "regex", flagVal: "", compareValue: "blah", expectedResultPattern: "'flagName' matched by regex expression 'blah'", testResult: false, - flagName: "flagName"}, + flagName: "flagName", + }, // Test Op "valid_elements" - {label: "op=valid_elements, valid_elements both empty", + { + label: "op=valid_elements, valid_elements both empty", op: "valid_elements", flagVal: "", compareValue: "", expectedResultPattern: "'flagWithMultipleElements' contains valid elements from ''", testResult: true, - flagName: "flagWithMultipleElements"}, + flagName: "flagWithMultipleElements", + }, - {label: "op=valid_elements, valid_elements flagVal empty", + { + label: "op=valid_elements, valid_elements flagVal empty", op: "valid_elements", flagVal: "", compareValue: "a,b", expectedResultPattern: "'flagWithMultipleElements' contains valid elements from 'a,b'", testResult: false, - flagName: "flagWithMultipleElements"}, + flagName: "flagWithMultipleElements", + }, - {label: "op=valid_elements, valid_elements compareValue empty", + { + label: "op=valid_elements, valid_elements compareValue empty", op: "valid_elements", flagVal: "a,b", compareValue: "", expectedResultPattern: "'flagWithMultipleElements' contains valid elements from ''", testResult: false, - flagName: "flagWithMultipleElements"}, - {label: "op=valid_elements, valid_elements two list equals", + flagName: "flagWithMultipleElements", + }, + { + label: "op=valid_elements, valid_elements two list equals", op: "valid_elements", flagVal: "a,b,c", compareValue: "a,b,c", expectedResultPattern: "'flagWithMultipleElements' contains valid elements from 'a,b,c'", testResult: true, - flagName: "flagWithMultipleElements"}, - {label: "op=valid_elements, valid_elements partial flagVal valid", + flagName: "flagWithMultipleElements", + }, + { + label: "op=valid_elements, valid_elements partial flagVal valid", op: "valid_elements", flagVal: "a,c", compareValue: "a,b,c", expectedResultPattern: "'flagWithMultipleElements' contains valid elements from 'a,b,c'", testResult: true, - flagName: "flagWithMultipleElements"}, - {label: "op=valid_elements, valid_elements partial compareValue valid", + flagName: "flagWithMultipleElements", + }, + { + label: "op=valid_elements, valid_elements partial compareValue valid", op: "valid_elements", flagVal: "a,b,c", compareValue: "a,c", expectedResultPattern: "'flagWithMultipleElements' contains valid elements from 'a,c'", testResult: false, - flagName: "flagWithMultipleElements"}, + flagName: "flagWithMultipleElements", + }, // Test Op "bitmask" - {label: "op=bitmask, 644 AND 640", + { + label: "op=bitmask, 644 AND 640", op: "bitmask", flagVal: "640", compareValue: "644", expectedResultPattern: "etc/fileExamplePermission640 has permissions 640, expected 644 or more restrictive", testResult: true, - flagName: "etc/fileExamplePermission640"}, - {label: "op=bitmask, 644 AND 777", + flagName: "etc/fileExamplePermission640", + }, + { + label: "op=bitmask, 644 AND 777", op: "bitmask", flagVal: "777", compareValue: "644", expectedResultPattern: "etc/fileExamplePermission777 has permissions 777, expected 644 or more restrictive", testResult: false, - flagName: "etc/fileExamplePermission777"}, - {label: "op=bitmask, 644 AND 444", + flagName: "etc/fileExamplePermission777", + }, + { + label: "op=bitmask, 644 AND 444", op: "bitmask", flagVal: "444", compareValue: "644", expectedResultPattern: "etc/fileExamplePermission444 has permissions 444, expected 644 or more restrictive", testResult: true, - flagName: "etc/fileExamplePermission444"}, - {label: "op=bitmask, 644 AND 211", + flagName: "etc/fileExamplePermission444", + }, + { + label: "op=bitmask, 644 AND 211", op: "bitmask", flagVal: "211", compareValue: "644", expectedResultPattern: "etc/fileExamplePermission211 has permissions 211, expected 644 or more restrictive", testResult: false, - flagName: "etc/fileExamplePermission211"}, - {label: "op=bitmask, Harry AND 211", + flagName: "etc/fileExamplePermission211", + }, + { + label: "op=bitmask, Harry AND 211", op: "bitmask", flagVal: "Harry", compareValue: "644", expectedResultPattern: "Not numeric value - flag: Harry", testResult: false, - flagName: "etc/fileExample"}, - {label: "op=bitmask, 644 AND Potter", + flagName: "etc/fileExample", + }, + { + label: "op=bitmask, 644 AND Potter", op: "bitmask", flagVal: "211", compareValue: "Potter", expectedResultPattern: "Not numeric value - flag: Potter", testResult: false, - flagName: "etc/fileExample"}, + flagName: "etc/fileExample", + }, } for _, c := range cases { @@ -1136,37 +1277,36 @@ func TestToNumeric(t *testing.T) { } func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { - type Resources struct { - Resources []string `json:"resources"` - Providers []map[string]interface{} `json:"providers"` + Resources []string `json:"resources"` + Providers []map[string]interface{} `json:"providers"` } type EncryptionConfig struct { - Kind string `json:"kind"` - ApiVersion string `json:"apiVersion"` - Resources []Resources `json:"resources"` + Kind string `json:"kind"` + ApiVersion string `json:"apiVersion"` + Resources []Resources `json:"resources"` } type Key struct { - Secret string `json:"secret"` - Name string `json:"name"` + Secret string `json:"secret"` + Name string `json:"name"` } type Aescbc struct { - Keys []Key `json:"keys"` + Keys []Key `json:"keys"` } type SecretBox struct { - Keys []Key `json:"keys"` + Keys []Key `json:"keys"` } - type Aesgcm struct { - Keys []Key `json:"keys"` + type Aesgcm struct { + Keys []Key `json:"keys"` } // identity disable encryption when set as the first parameter - type Identity struct {} + type Identity struct{} cases := []struct { name string @@ -1179,11 +1319,12 @@ func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { "JSONPath parse works, results match", "{.resources[*].providers[*].aescbc.keys[*].secret}", EncryptionConfig{ - Kind: "EncryptionConfig", + Kind: "EncryptionConfig", ApiVersion: "v1", Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ - {"aescbc": Aescbc{Keys: []Key{Key{Secret: "secret1", Name: "name1"}}}}, - }}}}, + {"aescbc": Aescbc{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, "secret1", false, }, @@ -1191,11 +1332,12 @@ func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { "JSONPath parse works, results match", "{.resources[*].providers[*].aescbc.keys[*].name}", EncryptionConfig{ - Kind: "EncryptionConfig", + Kind: "EncryptionConfig", ApiVersion: "v1", Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ - {"aescbc": Aescbc{Keys: []Key{Key{Secret: "secret1", Name: "name1"}}}}, - }}}}, + {"aescbc": Aescbc{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, "name1", false, }, @@ -1203,11 +1345,12 @@ func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { "JSONPath parse works, results don't match", "{.resources[*].providers[*].aescbc.keys[*].secret}", EncryptionConfig{ - Kind: "EncryptionConfig", + Kind: "EncryptionConfig", ApiVersion: "v1", Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ - {"aesgcm": Aesgcm{Keys: []Key{Key{Secret: "secret1", Name: "name1"}}}}, - }}}}, + {"aesgcm": Aesgcm{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, "secret1", true, }, @@ -1215,11 +1358,12 @@ func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { "JSONPath parse works, results match", "{.resources[*].providers[*].aesgcm.keys[*].secret}", EncryptionConfig{ - Kind: "EncryptionConfig", + Kind: "EncryptionConfig", ApiVersion: "v1", Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ - {"aesgcm": Aesgcm{Keys: []Key{Key{Secret: "secret1", Name: "name1"}}}}, - }}}}, + {"aesgcm": Aesgcm{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, "secret1", false, }, @@ -1227,11 +1371,12 @@ func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { "JSONPath parse works, results match", "{.resources[*].providers[*].secretbox.keys[*].secret}", EncryptionConfig{ - Kind: "EncryptionConfig", + Kind: "EncryptionConfig", ApiVersion: "v1", Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ - {"secretbox": SecretBox{Keys: []Key{Key{Secret: "secret1", Name: "name1"}}}}, - }}}}, + {"secretbox": SecretBox{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, "secret1", false, }, @@ -1239,11 +1384,12 @@ func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { "JSONPath parse works, results match", "{.resources[*].providers[*].aescbc.keys[*].secret}", EncryptionConfig{ - Kind: "EncryptionConfig", + Kind: "EncryptionConfig", ApiVersion: "v1", Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ - {"aescbc": Aescbc{Keys: []Key{Key{Secret: "secret1", Name: "name1"}, Key{Secret: "secret2", Name: "name2"}}}}, - }}}}, + {"aescbc": Aescbc{Keys: []Key{{Secret: "secret1", Name: "name1"}, {Secret: "secret2", Name: "name2"}}}}, + }}}, + }, "secret1 secret2", false, }, diff --git a/cmd/common.go b/cmd/common.go index 6fb72ae1a..20e258a0b 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -47,7 +47,7 @@ func NewRunFilter(opts FilterOpts) (check.Predicate, error) { } return func(g *check.Group, c *check.Check) bool { - var test = true + test := true if len(groupIDs) > 0 { _, ok := groupIDs[g.ID] test = test && ok @@ -87,7 +87,6 @@ func runChecks(nodetype check.NodeType, testYamlFile, detectedVersion string) { // Get the set of executables we need for this section of the tests binmap, err := getBinaries(typeConf, nodetype) - // Checks that the executables we need for the section are running. if err != nil { glog.V(1).Info(fmt.Sprintf("failed to get a set of executables needed for tests: %v", err)) @@ -148,7 +147,7 @@ func generateDefaultEnvAudit(controls *check.Controls, binSubs []string) { } func parseSkipIds(skipIds string) map[string]bool { - var skipIdMap = make(map[string]bool, 0) + skipIdMap := make(map[string]bool, 0) if skipIds != "" { for _, id := range strings.Split(skipIds, ",") { skipIdMap[strings.Trim(id, " ")] = true diff --git a/cmd/common_test.go b/cmd/common_test.go index 04c8cb35e..c9497754b 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -52,7 +52,6 @@ func TestParseSkipIds(t *testing.T) { } func TestNewRunFilter(t *testing.T) { - type TestCase struct { Name string FilterOpts FilterOpts @@ -139,7 +138,6 @@ func TestNewRunFilter(t *testing.T) { // then assert.EqualError(t, err, "group option and check option can't be used together") }) - } func TestIsMaster(t *testing.T) { @@ -212,7 +210,6 @@ func TestIsMaster(t *testing.T) { } func TestMapToCISVersion(t *testing.T) { - viperWithData, err := loadConfigForTest() if err != nil { t.Fatalf("Unable to load config file %v", err) diff --git a/cmd/kubernetes_version_test.go b/cmd/kubernetes_version_test.go index b06f79f49..6ae045316 100644 --- a/cmd/kubernetes_version_test.go +++ b/cmd/kubernetes_version_test.go @@ -72,7 +72,6 @@ FAjB57z2NcIgJuVpQnGRYtr/JcH2Qdsq8bLtXaojUIWOOqoTDRLYozdMOOQ= t.Errorf("Expected error") } } - }) } } @@ -124,8 +123,8 @@ func TestGetWebData(t *testing.T) { } }) } - } + func TestGetWebDataWithRetry(t *testing.T) { okfn := func(w http.ResponseWriter, r *http.Request) { _, _ = fmt.Fprintln(w, `{ @@ -173,8 +172,8 @@ func TestGetWebDataWithRetry(t *testing.T) { } }) } - } + func TestExtractVersion(t *testing.T) { okJSON := []byte(`{ "major": "1", @@ -231,7 +230,6 @@ func TestExtractVersion(t *testing.T) { } func TestGetKubernetesURL(t *testing.T) { - resetEnvs := func() { os.Unsetenv("KUBE_BENCH_K8S_ENV") os.Unsetenv("KUBERNETES_SERVICE_HOST") @@ -277,5 +275,4 @@ func TestGetKubernetesURL(t *testing.T) { } }) } - } diff --git a/cmd/root.go b/cmd/root.go index 7d2e65a8b..c674aeb1a 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -200,7 +200,6 @@ func init() { goflag.CommandLine.VisitAll(func(goflag *goflag.Flag) { RootCmd.PersistentFlags().AddGoFlag(goflag) }) - } // initConfig reads in config file and ENV variables if set. diff --git a/cmd/securityHub.go b/cmd/securityHub.go index ae6fd817a..56d6811f9 100644 --- a/cmd/securityHub.go +++ b/cmd/securityHub.go @@ -11,7 +11,7 @@ import ( "github.com/spf13/viper" ) -//REGION ... +// REGION ... const REGION = "AWS_REGION" func writeFinding(in []*securityhub.AwsSecurityFinding) error { @@ -20,7 +20,8 @@ func writeFinding(in []*securityhub.AwsSecurityFinding) error { return fmt.Errorf("%s not set", REGION) } sess, err := session.NewSession(&aws.Config{ - Region: aws.String(r)}, + Region: aws.String(r), + }, ) if err != nil { return err diff --git a/cmd/util.go b/cmd/util.go index 365d5ef9f..12546f538 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -16,26 +16,26 @@ import ( "github.com/spf13/viper" ) +// Print colors +var colors = map[check.State]*color.Color{ + check.PASS: color.New(color.FgGreen), + check.FAIL: color.New(color.FgRed), + check.WARN: color.New(color.FgYellow), + check.INFO: color.New(color.FgBlue), +} + var ( - // Print colors - colors = map[check.State]*color.Color{ - check.PASS: color.New(color.FgGreen), - check.FAIL: color.New(color.FgRed), - check.WARN: color.New(color.FgYellow), - check.INFO: color.New(color.FgBlue), + psFunc func(string) string + statFunc func(string) (os.FileInfo, error) + getBinariesFunc func(*viper.Viper, check.NodeType) (map[string]string, error) + TypeMap = map[string][]string{ + "ca": {"cafile", "defaultcafile"}, + "kubeconfig": {"kubeconfig", "defaultkubeconfig"}, + "service": {"svc", "defaultsvc"}, + "config": {"confs", "defaultconf"}, } ) -var psFunc func(string) string -var statFunc func(string) (os.FileInfo, error) -var getBinariesFunc func(*viper.Viper, check.NodeType) (map[string]string, error) -var TypeMap = map[string][]string{ - "ca": {"cafile", "defaultcafile"}, - "kubeconfig": {"kubeconfig", "defaultkubeconfig"}, - "service": {"svc", "defaultsvc"}, - "config": {"confs", "defaultconf"}, -} - func init() { psFunc = ps statFunc = os.Stat @@ -208,7 +208,6 @@ func getFiles(v *viper.Viper, fileType string) map[string]string { // verifyBin checks that the binary specified is running func verifyBin(bin string) bool { - // Strip any quotes bin = strings.Trim(bin, "'\"") @@ -290,7 +289,6 @@ Alternatively, you can specify the version with --version ` func getKubeVersion() (*KubeVersion, error) { - if k8sVer, err := getKubeVersionFromRESTAPI(); err == nil { glog.V(2).Info(fmt.Sprintf("Kubernetes REST API Reported version: %s", k8sVer)) return k8sVer, nil @@ -298,7 +296,6 @@ func getKubeVersion() (*KubeVersion, error) { // These executables might not be on the user's path. _, err := exec.LookPath("kubectl") - if err != nil { glog.V(3).Infof("Error locating kubectl: %s", err) _, err = exec.LookPath("kubelet") @@ -337,7 +334,6 @@ func getKubeVersionFromKubectl() *KubeVersion { func getKubeVersionFromKubelet() *KubeVersion { cmd := exec.Command("kubelet", "--version") out, err := cmd.CombinedOutput() - if err != nil { glog.V(2).Infof("Failed to query kubelet: %s", err) glog.V(2).Info(err) @@ -401,11 +397,9 @@ func makeSubstitutions(s string, ext string, m map[string]string) (string, []str func isEmpty(str string) bool { return strings.TrimSpace(str) == "" - } func buildComponentMissingErrorMessage(nodetype check.NodeType, component string, bins []string) string { - errMessageTemplate := ` Unable to detect running programs for component %q The following %q programs have been searched, but none of them have been found: diff --git a/cmd/util_test.go b/cmd/util_test.go index 4d88abc87..79a505afe 100644 --- a/cmd/util_test.go +++ b/cmd/util_test.go @@ -29,9 +29,11 @@ import ( "github.com/spf13/viper" ) -var g string -var e []error -var eIndex int +var ( + g string + e []error + eIndex int +) func fakeps(proc string) string { return g @@ -132,7 +134,7 @@ func TestGetBinaries(t *testing.T) { expectErr: false, }, { - // "anotherthing" in list of components but doesn't have a defintion + // "anotherthing" in list of components but doesn't have a definition config: map[string]interface{}{"components": []string{"apiserver", "anotherthing"}, "apiserver": map[string]interface{}{"bins": []string{"apiserver", "kube-apiserver"}}, "thing": map[string]interface{}{"bins": []string{"something else", "thing"}}}, psOut: "kube-apiserver thing", exp: map[string]string{"apiserver": "kube-apiserver"}, @@ -262,7 +264,8 @@ func TestGetConfigFiles(t *testing.T) { config: map[string]interface{}{ "components": []string{"apiserver"}, "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}, - "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}}, + "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}, + }, statResults: []error{os.ErrNotExist, nil}, exp: map[string]string{"apiserver": "kube-apiserver"}, }, @@ -271,7 +274,8 @@ func TestGetConfigFiles(t *testing.T) { config: map[string]interface{}{ "components": []string{"apiserver", "thing"}, "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}, - "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}}, + "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}, + }, statResults: []error{os.ErrNotExist, nil, nil}, exp: map[string]string{"apiserver": "kube-apiserver", "thing": "/my/file/thing"}, }, @@ -280,7 +284,8 @@ func TestGetConfigFiles(t *testing.T) { config: map[string]interface{}{ "components": []string{"apiserver", "thing"}, "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}, - "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}, "defaultconf": "another/thing"}}, + "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}, "defaultconf": "another/thing"}, + }, statResults: []error{os.ErrNotExist, nil, os.ErrNotExist}, exp: map[string]string{"apiserver": "kube-apiserver", "thing": "another/thing"}, }, @@ -289,7 +294,8 @@ func TestGetConfigFiles(t *testing.T) { config: map[string]interface{}{ "components": []string{"apiserver", "thing"}, "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}, - "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}}, + "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}, + }, statResults: []error{os.ErrNotExist, nil, os.ErrNotExist}, exp: map[string]string{"apiserver": "kube-apiserver", "thing": "thing"}, }, @@ -459,7 +465,6 @@ func TestGetConfigFilePath(t *testing.T) { } func TestDecrementVersion(t *testing.T) { - cases := []struct { kubeVersion string succeed bool @@ -646,7 +651,6 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) { } func Test_getOcpValidVersion(t *testing.T) { - cases := []struct { openShiftVersion string succeed bool diff --git a/cmd/version.go b/cmd/version.go index beb046972..e9b2dcd39 100644 --- a/cmd/version.go +++ b/cmd/version.go @@ -2,6 +2,7 @@ package cmd import ( "fmt" + "github.com/spf13/cobra" ) From 436141eac9923e0fe3d52144b2af620c5526c903 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Tue, 5 Apr 2022 21:54:17 +0800 Subject: [PATCH 037/262] Remove a needless debug log (#1145) --- cmd/kubernetes_version.go | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/kubernetes_version.go b/cmd/kubernetes_version.go index 3f3f7e96f..da7769ca6 100644 --- a/cmd/kubernetes_version.go +++ b/cmd/kubernetes_version.go @@ -128,7 +128,6 @@ func getWebData(srvURL, token string, cacert *tls.Certificate) ([]byte, error) { } authToken := fmt.Sprintf("Bearer %s", token) - glog.V(2).Info(fmt.Sprintf("getWebData AUTH TOKEN --[%q]--\n", authToken)) req.Header.Set("Authorization", authToken) resp, err := client.Do(req) From f445e3367123ad9c53e3144cf9502e653a085b0b Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Wed, 6 Apr 2022 13:12:04 +0800 Subject: [PATCH 038/262] Upgrade goreleaser to v1.7.0 (#1143) --- .github/workflows/build.yml | 5 ++++- .github/workflows/release.yml | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 529c75920..957a10045 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -92,8 +92,11 @@ jobs: go-version: 1.16 - name: Checkout code uses: actions/checkout@v3 + with: + fetch-depth: 0 - name: Dry-run release snapshot uses: goreleaser/goreleaser-action@v2 with: - version: v0.169.0 + distribution: goreleaser + version: v1.7.0 args: release --snapshot --skip-publish --rm-dist diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c88fa8d27..b935b42ea 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,6 +20,8 @@ jobs: go-version: 1.16 - name: Checkout code uses: actions/checkout@v3 + with: + fetch-depth: 0 - name: Run unit tests run: make tests - name: Setup Kubernetes cluster (KIND) @@ -44,7 +46,8 @@ jobs: - name: Release uses: goreleaser/goreleaser-action@v2 with: - version: v0.169.0 + distribution: goreleaser + version: v1.7.0 args: release --rm-dist env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 741ce798837bab288cf1ee03f890a3c3836234db Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Apr 2022 12:40:44 +0300 Subject: [PATCH 039/262] Bump golangci/golangci-lint-action from 2 to 3 (#1149) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 2 to 3. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v2...v3) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 957a10045..3e1f88c8a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -32,7 +32,7 @@ jobs: - name: yaml-lint uses: ibiqlik/action-yamllint@v3 - name: Setup golangci-lint - uses: golangci/golangci-lint-action@v2 + uses: golangci/golangci-lint-action@v3 with: version: latest args: --verbose From b6a2ff8c83e72c57ccfeaa601d77aef0a8b8a0e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Apr 2022 13:41:03 +0300 Subject: [PATCH 040/262] Bump codecov/codecov-action from 2 to 3 (#1150) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 2 to 3. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/master/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v2...v3) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3e1f88c8a..a3a53bbed 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -49,7 +49,7 @@ jobs: - name: Run unit tests run: make tests - name: Upload code coverage - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v3 with: file: ./coverage.txt e2e: From 1a08ef2dbc447ca4cb0af8ce4fea2589717e6d3d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Apr 2022 14:07:32 +0300 Subject: [PATCH 041/262] Bump actions/setup-go from 2 to 3 (#1151) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 2 to 3. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 8 ++++---- .github/workflows/release.yml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a3a53bbed..48dd5ebb7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-18.04 steps: - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: 1.16 - name: Checkout code @@ -41,7 +41,7 @@ jobs: runs-on: ubuntu-18.04 steps: - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: 1.16 - name: Checkout code @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-18.04 steps: - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: 1.16 - name: Checkout code @@ -87,7 +87,7 @@ jobs: needs: [e2e, unit] steps: - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: 1.16 - name: Checkout code diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b935b42ea..59a8848d9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-18.04 steps: - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v3 with: go-version: 1.16 - name: Checkout code From f28d9f804e22045b4a01eef2c42faf2023f464d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Apr 2022 14:40:29 +0300 Subject: [PATCH 042/262] Bump github.com/aws/aws-sdk-go from 1.43.27 to 1.43.37 (#1152) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.43.27 to 1.43.37. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.43.27...v1.43.37) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ff42da9cb..f6f03c77f 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.43.27 + github.com/aws/aws-sdk-go v1.43.37 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index 6fe6cf83d..a5f13d0fe 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.43.27 h1:de0u6uSrtjPtSZ7I/452GUU2iNDdbYIawR2qkYoD1z0= -github.com/aws/aws-sdk-go v1.43.27/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.43.37 h1:kyZ7UjaPZaCik+asF33UFOOYSwr9liDRr/UM/vuw8yY= +github.com/aws/aws-sdk-go v1.43.37/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From 3040a1f80ea4dfdf307ff4f55671730fcb0ac20b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Apr 2022 15:01:31 +0300 Subject: [PATCH 043/262] Bump gorm.io/driver/postgres from 1.3.1 to 1.3.4 (#1153) Bumps [gorm.io/driver/postgres](https://github.com/go-gorm/postgres) from 1.3.1 to 1.3.4. - [Release notes](https://github.com/go-gorm/postgres/releases) - [Commits](https://github.com/go-gorm/postgres/compare/v1.3.1...v1.3.4) --- updated-dependencies: - dependency-name: gorm.io/driver/postgres dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index f6f03c77f..f3fc25c3f 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/spf13/viper v1.10.0 github.com/stretchr/testify v1.7.1 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.3.1 + gorm.io/driver/postgres v1.3.4 gorm.io/gorm v1.23.3 k8s.io/client-go v0.23.5 ) diff --git a/go.sum b/go.sum index a5f13d0fe..460c76a00 100644 --- a/go.sum +++ b/go.sum @@ -291,8 +291,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.10.1 h1:DzdIHIjG1AxGwoEEqS+mGsURyjt4enSmqzACXvVzOT8= -github.com/jackc/pgconn v1.10.1/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.11.0 h1:HiHArx4yFbwl91X3qqIHtUFoiIfLNJXCQRsnzkiwwaQ= +github.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -317,18 +317,18 @@ github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01C github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.9.1 h1:MJc2s0MFS8C3ok1wQTdQxWuXQcB6+HwAm5x1CzW7mf0= -github.com/jackc/pgtype v1.9.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.10.0 h1:ILnBWrRMSXGczYvmkYD6PsYyVFUNLTnIUJHHDLmqk38= +github.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.14.1 h1:71oo1KAGI6mXhLiTMn6iDFcp3e7+zon/capWjl2OEFU= -github.com/jackc/pgx/v4 v4.14.1/go.mod h1:RgDuE4Z34o7XE92RpLsvFiOEfrAUT0Xt2KxvX73W06M= +github.com/jackc/pgx/v4 v4.15.0 h1:B7dTkXsdILD3MF987WGGCcg+tvLW6bZJdEcqVFeU//w= +github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.2.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.4 h1:tHnRBy1i5F2Dh8BAFxqFzxKqqvezXrL2OW1TnX+Mlas= @@ -985,8 +985,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.3.1 h1:Pyv+gg1Gq1IgsLYytj/S2k7ebII3CzEdpqQkPOdH24g= -gorm.io/driver/postgres v1.3.1/go.mod h1:WwvWOuR9unCLpGWCL6Y3JOeBWvbKi6JLhayiVclSZZU= +gorm.io/driver/postgres v1.3.4 h1:evZ7plF+Bp+Lr1mO5NdPvd6M/N98XtwHixGB+y7fdEQ= +gorm.io/driver/postgres v1.3.4/go.mod h1:y0vEuInFKJtijuSGu9e5bs5hzzSzPK+LancpKpvbRBw= gorm.io/gorm v1.23.1/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.23.3 h1:jYh3nm7uLZkrMVfA8WVNjDZryKfr7W+HTlInVgKFJAg= gorm.io/gorm v1.23.3/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= From 48b80f8f01fadae390e91d10d13431b81456ac03 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Apr 2022 15:12:21 +0300 Subject: [PATCH 044/262] Bump alpine from 3.15.2 to 3.15.4 (#1146) Bumps alpine from 3.15.2 to 3.15.4. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 4fb4893e8..c39049d68 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.15.2 AS run +FROM alpine:3.15.4 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 From 521136504c77b12fd8c2f1a70133b695f857b44f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Apr 2022 17:12:13 +0300 Subject: [PATCH 045/262] Bump gorm.io/gorm from 1.23.3 to 1.23.4 (#1141) Bumps [gorm.io/gorm](https://github.com/go-gorm/gorm) from 1.23.3 to 1.23.4. - [Release notes](https://github.com/go-gorm/gorm/releases) - [Commits](https://github.com/go-gorm/gorm/compare/v1.23.3...v1.23.4) --- updated-dependencies: - dependency-name: gorm.io/gorm dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f3fc25c3f..18329bed2 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,6 @@ require ( github.com/stretchr/testify v1.7.1 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.4 - gorm.io/gorm v1.23.3 + gorm.io/gorm v1.23.4 k8s.io/client-go v0.23.5 ) diff --git a/go.sum b/go.sum index 460c76a00..58e43f85f 100644 --- a/go.sum +++ b/go.sum @@ -988,8 +988,8 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gorm.io/driver/postgres v1.3.4 h1:evZ7plF+Bp+Lr1mO5NdPvd6M/N98XtwHixGB+y7fdEQ= gorm.io/driver/postgres v1.3.4/go.mod h1:y0vEuInFKJtijuSGu9e5bs5hzzSzPK+LancpKpvbRBw= gorm.io/gorm v1.23.1/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= -gorm.io/gorm v1.23.3 h1:jYh3nm7uLZkrMVfA8WVNjDZryKfr7W+HTlInVgKFJAg= -gorm.io/gorm v1.23.3/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/gorm v1.23.4 h1:1BKWM67O6CflSLcwGQR7ccfmC4ebOxQrTfOQGRE9wjg= +gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 02fd0d4be22d88af1ceee1556b4b0aa0ba73aa11 Mon Sep 17 00:00:00 2001 From: Qiming Teng Date: Mon, 18 Apr 2022 14:27:33 +0800 Subject: [PATCH 046/262] Add support to CIS-1.23 1.0.0 (#1148) --- cfg/cis-1.23/config.yaml | 2 + cfg/cis-1.23/controlplane.yaml | 46 ++ cfg/cis-1.23/etcd.yaml | 135 +++++ cfg/cis-1.23/master.yaml | 954 +++++++++++++++++++++++++++++++++ cfg/cis-1.23/node.yaml | 462 ++++++++++++++++ cfg/cis-1.23/policies.yaml | 269 ++++++++++ cfg/config.yaml | 9 + cmd/common_test.go | 2 + docs/architecture.md | 23 +- docs/platforms.md | 23 +- 10 files changed, 1903 insertions(+), 22 deletions(-) create mode 100644 cfg/cis-1.23/config.yaml create mode 100644 cfg/cis-1.23/controlplane.yaml create mode 100644 cfg/cis-1.23/etcd.yaml create mode 100644 cfg/cis-1.23/master.yaml create mode 100644 cfg/cis-1.23/node.yaml create mode 100644 cfg/cis-1.23/policies.yaml diff --git a/cfg/cis-1.23/config.yaml b/cfg/cis-1.23/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/cis-1.23/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/cis-1.23/controlplane.yaml b/cfg/cis-1.23/controlplane.yaml new file mode 100644 index 000000000..1036d1e96 --- /dev/null +++ b/cfg/cis-1.23/controlplane.yaml @@ -0,0 +1,46 @@ +--- +controls: +version: "cis-1.23" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/cis-1.23/etcd.yaml b/cfg/cis-1.23/etcd.yaml new file mode 100644 index 000000000..92b72a6fa --- /dev/null +++ b/cfg/cis-1.23/etcd.yaml @@ -0,0 +1,135 @@ +--- +controls: +version: "cis-1.23" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/cis-1.23/master.yaml b/cfg/cis-1.23/master.yaml new file mode 100644 index 000000000..c77c9a1f8 --- /dev/null +++ b/cfg/cis-1.23/master.yaml @@ -0,0 +1,954 @@ +--- +controls: +version: "cis-1.23" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 644 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c permissions=%a + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 644 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.23/node.yaml b/cfg/cis-1.23/node.yaml new file mode 100644 index 000000000..bfec111ca --- /dev/null +++ b/cfg/cis-1.23/node.yaml @@ -0,0 +1,462 @@ +--- +controls: +version: "cis-1.23" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 644 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 644 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/cis-1.23/policies.yaml b/cfg/cis-1.23/policies.yaml new file mode 100644 index 000000000..92216e4a6 --- /dev/null +++ b/cfg/cis-1.23/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "cis-1.23" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/config.yaml b/cfg/config.yaml index 6f7cf0fa8..fcb8bd84c 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -246,6 +246,9 @@ version_mapping: "1.18": "cis-1.6" "1.19": "cis-1.20" "1.20": "cis-1.20" + "1.21": "cis-1.20" + "1.22": "cis-1.23" + "1.23": "cis-1.23" "eks-1.0.1": "eks-1.0.1" "gke-1.0": "gke-1.0" "gke-1.2.0": "gke-1.2.0" @@ -274,6 +277,12 @@ target_mapping: - "controlplane" - "etcd" - "policies" + "cis-1.23": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" "gke-1.0": - "master" - "node" diff --git a/cmd/common_test.go b/cmd/common_test.go index c9497754b..5f22897a4 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -237,6 +237,8 @@ func TestMapToCISVersion(t *testing.T) { {kubeVersion: "1.19", succeed: true, exp: "cis-1.20"}, {kubeVersion: "1.20", succeed: true, exp: "cis-1.20"}, {kubeVersion: "1.21", succeed: true, exp: "cis-1.20"}, + {kubeVersion: "1.22", succeed: true, exp: "cis-1.23"}, + {kubeVersion: "1.23", succeed: true, exp: "cis-1.23"}, {kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"}, {kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"}, {kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"}, diff --git a/docs/architecture.md b/docs/architecture.md index b2a48f485..18370634e 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -14,16 +14,17 @@ Check the contents of the benchmark directory under `cfg` to see which targets a The following table shows the valid targets based on the CIS Benchmark version. | CIS Benchmark | Targets | -|---|---| -| cis-1.5| master, controlplane, node, etcd, policies | -| cis-1.6| master, controlplane, node, etcd, policies | -|cis-1.20| master, controlplane, node, etcd, policies | -| gke-1.0| master, controlplane, node, etcd, policies, managedservices | -| gke-1.2.0| controlplane, node, policies, managedservices | -| eks-1.0.1| controlplane, node, policies, managedservices | -| ack-1.0| master, controlplane, node, etcd, policies, managedservices | -| aks-1.0| controlplane, node, policies, managedservices | -| rh-0.7| master,node| -| rh-1.0| master, controlplane, node, etcd, policies | +|---------------|---------| +| cis-1.5 | master, controlplane, node, etcd, policies | +| cis-1.6 | master, controlplane, node, etcd, policies | +| cis-1.20 | master, controlplane, node, etcd, policies | +| cis-1.23 | master, controlplane, node, etcd, policies | +| gke-1.0 | master, controlplane, node, etcd, policies, managedservices | +| gke-1.2.0 | controlplane, node, policies, managedservices | +| eks-1.0.1 | controlplane, node, policies, managedservices | +| ack-1.0 | master, controlplane, node, etcd, policies, managedservices | +| aks-1.0 | controlplane, node, policies, managedservices | +| rh-0.7 | master,node| +| rh-1.0 | master, controlplane, node, etcd, policies | diff --git a/docs/platforms.md b/docs/platforms.md index cf0477471..0883eed71 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -6,14 +6,15 @@ Most of our supported benchmarks are defined in the [CIS Kubernetes Benchmarks]( Some defined by other hardenening guides. | Source | Kubernetes Benchmark | kube-bench config | Kubernetes versions | -|---|---|---|---| -| CIS | [1.5.1](https://workbench.cisecurity.org/benchmarks/4892) | cis-1.5 | 1.15 | -| CIS | [1.6.0](https://workbench.cisecurity.org/benchmarks/4834) | cis-1.6 | 1.16-1.18 | -| CIS | [1.20](https://workbench.cisecurity.org/benchmarks/6246) | cis-1.20 | 1.19-1.20 | -| CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | -| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | -| CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | -| CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | -| CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | -| RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | -| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | +|------|----------------------------------------------------------------|-----------|-------------| +| CIS | [1.5.1](https://workbench.cisecurity.org/benchmarks/4892) | cis-1.5 | 1.15 | +| CIS | [1.6.0](https://workbench.cisecurity.org/benchmarks/4834) | cis-1.6 | 1.16-1.18 | +| CIS | [1.20](https://workbench.cisecurity.org/benchmarks/6246) | cis-1.20 | 1.19-1.21 | +| CIS | [1.23](https://workbench.cisecurity.org/benchmarks/7532) | cis-1.23 | 1.22-1.23 | +| CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | +| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | +| CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | +| CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | +| CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | +| RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | +| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | From aea61c93c707894c18a104c79b25e1eb1b5d913a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Apr 2022 08:03:43 +0300 Subject: [PATCH 047/262] Bump golang from 1.18.0 to 1.18.1 (#1155) Bumps golang from 1.18.0 to 1.18.1. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index c39049d68..5cd8f7115 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18.0 AS build +FROM golang:1.18.1 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 3089d6e1b59052466a1f35cb46e9fd5931d990fc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Apr 2022 08:11:59 +0300 Subject: [PATCH 048/262] Bump github.com/aws/aws-sdk-go from 1.43.37 to 1.43.41 (#1156) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.43.37 to 1.43.41. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.43.37...v1.43.41) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 18329bed2..2eb7f5bb5 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.43.37 + github.com/aws/aws-sdk-go v1.43.41 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index 58e43f85f..79ebccaae 100644 --- a/go.sum +++ b/go.sum @@ -73,8 +73,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.43.37 h1:kyZ7UjaPZaCik+asF33UFOOYSwr9liDRr/UM/vuw8yY= -github.com/aws/aws-sdk-go v1.43.37/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.43.41 h1:HaazVplP8/t6SOfybQlNUmjAxLWDKdLdX8BSEHFlJdY= +github.com/aws/aws-sdk-go v1.43.41/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From b8edf791f9e831084e1b9a49ea07da5f38f6b565 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Apr 2022 18:16:20 +0300 Subject: [PATCH 049/262] Bump github.com/spf13/viper from 1.10.0 to 1.11.0 (#1157) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.10.0 to 1.11.0. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.10.0...v1.11.0) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 115 ++++++++++++++++++++++++++++++++++++--------------------- 2 files changed, 73 insertions(+), 44 deletions(-) diff --git a/go.mod b/go.mod index 2eb7f5bb5..d6fbc4e9f 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.4.0 - github.com/spf13/viper v1.10.0 + github.com/spf13/viper v1.11.0 github.com/stretchr/testify v1.7.1 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.4 diff --git a/go.sum b/go.sum index 79ebccaae..9d2c0210a 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -15,6 +16,7 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -25,14 +27,17 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= @@ -45,6 +50,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= @@ -80,10 +86,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -97,9 +101,7 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -123,9 +125,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= @@ -210,6 +210,7 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -225,6 +226,7 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -236,20 +238,23 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -267,14 +272,11 @@ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -362,8 +364,6 @@ github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -385,7 +385,6 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -423,12 +422,14 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.0-beta.8 h1:dy81yyLYJDwMTifq24Oi/IslOslRrDSb3jwDggjz3Z0= +github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= @@ -452,7 +453,7 @@ github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OK github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= +github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= @@ -463,9 +464,8 @@ github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMB github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= +github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= @@ -474,8 +474,8 @@ github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmq github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.10.0 h1:mXH0UwHS4D2HwWZa75im4xIQynLfblmWV7qcWpfv0yk= -github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/spf13/viper v1.11.0 h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44= +github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -498,9 +498,9 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= +go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -524,7 +524,6 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -535,11 +534,14 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -575,11 +577,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -614,16 +614,20 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -641,6 +645,9 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -656,7 +663,6 @@ golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -704,6 +710,7 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -711,6 +718,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -718,16 +726,20 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -801,6 +813,7 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -812,8 +825,9 @@ golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -845,7 +859,11 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -889,7 +907,9 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -913,10 +933,17 @@ google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -943,7 +970,8 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -958,6 +986,7 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -968,8 +997,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= -gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= +gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From fbc89e1275b850cda0e902dcb2befecee04d5dfb Mon Sep 17 00:00:00 2001 From: chenk Date: Sun, 24 Apr 2022 15:12:40 +0500 Subject: [PATCH 050/262] release: prepare v0.6.8-rc1 (#1159) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index ef609f72e..7c4916226 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:v0.6.7 + image: aquasec/kube-bench:v0.6.8-rc1 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From d190cbcaf66d2681c8782081f51c3e9afaef5eb4 Mon Sep 17 00:00:00 2001 From: chenk Date: Sun, 24 Apr 2022 15:47:41 +0500 Subject: [PATCH 051/262] release: prepare v0.6.8 (#1160) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 7c4916226..201420e31 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:v0.6.8-rc1 + image: aquasec/kube-bench:v0.6.8 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From d5018f280b4c20773cdebaca4ff08268e691b9db Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Apr 2022 16:45:21 +0300 Subject: [PATCH 052/262] Bump github.com/aws/aws-sdk-go from 1.43.41 to 1.44.0 (#1163) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.43.41 to 1.44.0. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.43.41...v1.44.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d6fbc4e9f..f6d79ddf8 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.43.41 + github.com/aws/aws-sdk-go v1.44.0 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index 9d2c0210a..2192f09fc 100644 --- a/go.sum +++ b/go.sum @@ -79,8 +79,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.43.41 h1:HaazVplP8/t6SOfybQlNUmjAxLWDKdLdX8BSEHFlJdY= -github.com/aws/aws-sdk-go v1.43.41/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.0 h1:jwtHuNqfnJxL4DKHBUVUmQlfueQqBW7oXP6yebZR/R0= +github.com/aws/aws-sdk-go v1.44.0/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From 149c5268e8d2d0d8b4c1abc710dce54695ada1dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Apr 2022 09:04:58 +0300 Subject: [PATCH 053/262] Bump gorm.io/driver/postgres from 1.3.4 to 1.3.5 (#1164) Bumps [gorm.io/driver/postgres](https://github.com/go-gorm/postgres) from 1.3.4 to 1.3.5. - [Release notes](https://github.com/go-gorm/postgres/releases) - [Commits](https://github.com/go-gorm/postgres/compare/v1.3.4...v1.3.5) --- updated-dependencies: - dependency-name: gorm.io/driver/postgres dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 21 ++++++++++----------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index f6d79ddf8..3dc734c64 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/spf13/viper v1.11.0 github.com/stretchr/testify v1.7.1 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.3.4 + gorm.io/driver/postgres v1.3.5 gorm.io/gorm v1.23.4 k8s.io/client-go v0.23.5 ) diff --git a/go.sum b/go.sum index 2192f09fc..393dcc77e 100644 --- a/go.sum +++ b/go.sum @@ -293,8 +293,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.11.0 h1:HiHArx4yFbwl91X3qqIHtUFoiIfLNJXCQRsnzkiwwaQ= -github.com/jackc/pgconn v1.11.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.12.0 h1:/RvQ24k3TnNdfBSW0ou9EOi5jx2cX7zfE8n2nLKuiP0= +github.com/jackc/pgconn v1.12.0/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -311,22 +311,22 @@ github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvW github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.2.0 h1:r7JypeP2D3onoQTCxWdTpCtJ4D+qpKr0TxvoyMhZ5ns= -github.com/jackc/pgproto3/v2 v2.2.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.0 h1:brH0pCGBDkBW07HWlN/oSBXrmo3WB0UvZd1pIuDcL8Y= +github.com/jackc/pgproto3/v2 v2.3.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.10.0 h1:ILnBWrRMSXGczYvmkYD6PsYyVFUNLTnIUJHHDLmqk38= -github.com/jackc/pgtype v1.10.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.11.0 h1:u4uiGPz/1hryuXzyaBhSk6dnIyyG2683olG2OV+UUgs= +github.com/jackc/pgtype v1.11.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.15.0 h1:B7dTkXsdILD3MF987WGGCcg+tvLW6bZJdEcqVFeU//w= -github.com/jackc/pgx/v4 v4.15.0/go.mod h1:D/zyOyXiaM1TmVWnOM18p0xdDtdakRBa0RsVGI3U3bw= +github.com/jackc/pgx/v4 v4.16.0 h1:4k1tROTJctHotannFYzu77dY3bgtMRymQP7tXQjqpPk= +github.com/jackc/pgx/v4 v4.16.0/go.mod h1:N0A9sFdWzkw/Jy1lwoiB64F2+ugFZi987zRxcPez/wI= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= @@ -1014,9 +1014,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.3.4 h1:evZ7plF+Bp+Lr1mO5NdPvd6M/N98XtwHixGB+y7fdEQ= -gorm.io/driver/postgres v1.3.4/go.mod h1:y0vEuInFKJtijuSGu9e5bs5hzzSzPK+LancpKpvbRBw= -gorm.io/gorm v1.23.1/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/driver/postgres v1.3.5 h1:oVLmefGqBTlgeEVG6LKnH6krOlo4TZ3Q/jIK21KUMlw= +gorm.io/driver/postgres v1.3.5/go.mod h1:EGCWefLFQSVFrHGy4J8EtiHCWX5Q8t0yz2Jt9aKkGzU= gorm.io/gorm v1.23.4 h1:1BKWM67O6CflSLcwGQR7ccfmC4ebOxQrTfOQGRE9wjg= gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From f4c372fe423c7a707914dcbed0741c6fe74fb417 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 27 Apr 2022 11:23:53 +0300 Subject: [PATCH 054/262] Bump k8s.io/client-go from 0.23.5 to 0.23.6 (#1165) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.23.5 to 0.23.6. - [Release notes](https://github.com/kubernetes/client-go/releases) - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.23.5...v0.23.6) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 3dc734c64..e926359a7 100644 --- a/go.mod +++ b/go.mod @@ -15,5 +15,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.5 gorm.io/gorm v1.23.4 - k8s.io/client-go v0.23.5 + k8s.io/client-go v0.23.6 ) diff --git a/go.sum b/go.sum index 393dcc77e..20a98950a 100644 --- a/go.sum +++ b/go.sum @@ -1025,10 +1025,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8= -k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8= -k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4= +k8s.io/api v0.23.6/go.mod h1:1kFaYxGCFHYp3qd6a85DAj/yW8aVD6XLZMqJclkoi9g= +k8s.io/apimachinery v0.23.6/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/client-go v0.23.6 h1:7h4SctDVQAQbkHQnR4Kzi7EyUyvla5G1pFWf4+Od7hQ= +k8s.io/client-go v0.23.6/go.mod h1:Umt5icFOMLV/+qbtZ3PR0D+JA6lvvb3syzodv4irpK4= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= From 2496e391f5b6a8687b0a78692b98f96825098515 Mon Sep 17 00:00:00 2001 From: prakhar-aqua <99863600+prakhar-aqua@users.noreply.github.com> Date: Fri, 29 Apr 2022 20:25:33 +0530 Subject: [PATCH 055/262] Dockerfile with ubi8-minimal as base image (#1162) * Dockerfile with ubi8-minimal as base image * install jq , procps and override Path env * auto confirm for epel-release * Bump github.com/aws/aws-sdk-go from 1.43.41 to 1.44.0 (#1163) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.43.41 to 1.44.0. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.43.41...v1.44.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * auto conf for glibc,procps, install findutils & openssl * Bump gorm.io/driver/postgres from 1.3.4 to 1.3.5 (#1164) Bumps [gorm.io/driver/postgres](https://github.com/go-gorm/postgres) from 1.3.4 to 1.3.5. - [Release notes](https://github.com/go-gorm/postgres/releases) - [Commits](https://github.com/go-gorm/postgres/compare/v1.3.4...v1.3.5) --- updated-dependencies: - dependency-name: gorm.io/driver/postgres dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Bump k8s.io/client-go from 0.23.5 to 0.23.6 (#1165) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.23.5 to 0.23.6. - [Release notes](https://github.com/kubernetes/client-go/releases) - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.23.5...v0.23.6) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Dockerfile with ubi8-minimal as base image * install jq , procps and override Path env * auto confirm for epel-release * auto conf for glibc,procps, install findutils & openssl Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile.ubi8 | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 Dockerfile.ubi8 diff --git a/Dockerfile.ubi8 b/Dockerfile.ubi8 new file mode 100644 index 000000000..c3f6b1fe1 --- /dev/null +++ b/Dockerfile.ubi8 @@ -0,0 +1,48 @@ +FROM golang:1.18.1 AS build +WORKDIR /go/src/github.com/aquasecurity/kube-bench/ +COPY makefile makefile +COPY go.mod go.sum ./ +COPY main.go . +COPY check/ check/ +COPY cmd/ cmd/ +COPY internal/ internal/ +ARG KUBEBENCH_VERSION +RUN make build && cp kube-bench /go/bin/kube-bench + + +# ubi8-minimal base image for build with ubi standards +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5 as run + +RUN microdnf install yum findutils openssl\ + && yum -y update-minimal --security --sec-severity=Moderate --sec-severity=Important --sec-severity=Critical \ + && yum update -y \ + && yum install -y glibc \ + && yum update -y glibc \ + && yum install -y procps \ + && yum update -y procps \ + && yum install jq -y \ + && yum clean all \ + && microdnf remove yum || rpm -e -v yum \ + && microdnf clean all + +WORKDIR /opt/kube-bench/ + +ENV PATH=$PATH:/usr/local/mount-from-host/bin + +COPY --from=build /go/bin/kube-bench /usr/local/bin/kube-bench +COPY entrypoint.sh . +COPY cfg/ cfg/ +ENTRYPOINT ["./entrypoint.sh"] +CMD ["install"] + + +# Build-time metadata as defined at http://label-schema.org +ARG BUILD_DATE +ARG VCS_REF +LABEL org.label-schema.build-date=$BUILD_DATE \ + org.label-schema.name="kube-bench" \ + org.label-schema.description="Run the CIS Kubernetes Benchmark tests" \ + org.label-schema.url="https://github.com/aquasecurity/kube-bench" \ + org.label-schema.vcs-ref=$VCS_REF \ + org.label-schema.vcs-url="https://github.com/aquasecurity/kube-bench" \ + org.label-schema.schema-version="1.0" From ca4c9c633923616e3bb745852dbf4af931a49ad9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 May 2022 08:22:16 +0300 Subject: [PATCH 056/262] Bump gorm.io/gorm from 1.23.4 to 1.23.5 (#1166) Bumps [gorm.io/gorm](https://github.com/go-gorm/gorm) from 1.23.4 to 1.23.5. - [Release notes](https://github.com/go-gorm/gorm/releases) - [Commits](https://github.com/go-gorm/gorm/compare/v1.23.4...v1.23.5) --- updated-dependencies: - dependency-name: gorm.io/gorm dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index e926359a7..8e6c5cb02 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,6 @@ require ( github.com/stretchr/testify v1.7.1 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.5 - gorm.io/gorm v1.23.4 + gorm.io/gorm v1.23.5 k8s.io/client-go v0.23.6 ) diff --git a/go.sum b/go.sum index 20a98950a..5bb9c5799 100644 --- a/go.sum +++ b/go.sum @@ -1016,8 +1016,9 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.3.5 h1:oVLmefGqBTlgeEVG6LKnH6krOlo4TZ3Q/jIK21KUMlw= gorm.io/driver/postgres v1.3.5/go.mod h1:EGCWefLFQSVFrHGy4J8EtiHCWX5Q8t0yz2Jt9aKkGzU= -gorm.io/gorm v1.23.4 h1:1BKWM67O6CflSLcwGQR7ccfmC4ebOxQrTfOQGRE9wjg= gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/gorm v1.23.5 h1:TnlF26wScKSvknUC/Rn8t0NLLM22fypYBlvj1+aH6dM= +gorm.io/gorm v1.23.5/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 26968700274837ee307e5c0bb0f5dbd614dbe7c0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 May 2022 16:49:23 +0300 Subject: [PATCH 057/262] Bump github.com/aws/aws-sdk-go from 1.44.0 to 1.44.5 (#1167) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.0 to 1.44.5. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.0...v1.44.5) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8e6c5cb02..4b7a3b2f8 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.44.0 + github.com/aws/aws-sdk-go v1.44.5 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index 5bb9c5799..950b341f6 100644 --- a/go.sum +++ b/go.sum @@ -79,8 +79,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.44.0 h1:jwtHuNqfnJxL4DKHBUVUmQlfueQqBW7oXP6yebZR/R0= -github.com/aws/aws-sdk-go v1.44.0/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.5 h1:T4mckpWUfplPG4GA3FDWDCM1QaCzisjGzzeCVBhHKwQ= +github.com/aws/aws-sdk-go v1.44.5/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From aa66470631a60dbf82fb017b8cab324a90daf880 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 May 2022 08:51:53 +0300 Subject: [PATCH 058/262] Bump docker/login-action from 1 to 2 (#1170) Bumps [docker/login-action](https://github.com/docker/login-action) from 1 to 2. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/v1...v2) --- updated-dependencies: - dependency-name: docker/login-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/publish.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 0ea849a21..3a7e3b454 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -29,12 +29,12 @@ jobs: restore-keys: | ${{ runner.os }}-buildxarch- - name: Login to Docker Hub - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to ECR - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: registry: public.ecr.aws username: ${{ secrets.ECR_ACCESS_KEY_ID }} From 72a96b1f56769487a5c687e2076f84f9cddaf906 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 May 2022 09:26:57 +0300 Subject: [PATCH 059/262] Bump docker/setup-qemu-action from 1 to 2 (#1171) Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 1 to 2. - [Release notes](https://github.com/docker/setup-qemu-action/releases) - [Commits](https://github.com/docker/setup-qemu-action/compare/v1...v2) --- updated-dependencies: - dependency-name: docker/setup-qemu-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 3a7e3b454..194cb3f30 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -17,7 +17,7 @@ jobs: - name: Check Out Repo uses: actions/checkout@v3 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx id: buildx uses: docker/setup-buildx-action@v1 From 06b7941e1315a49a6c2bcedaeadfa80e7247a475 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 May 2022 00:11:41 +0300 Subject: [PATCH 060/262] Bump docker/setup-buildx-action from 1 to 2 (#1174) Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 1 to 2. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/v1...v2) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 194cb3f30..1803b51e8 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -20,7 +20,7 @@ jobs: uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 - name: Cache Docker layers uses: actions/cache@v3 with: From 54b797349ba4fd8f1a0bdf261ad91834ddf83cdb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 May 2022 00:23:21 +0300 Subject: [PATCH 061/262] Bump docker/build-push-action from 2 to 3 (#1175) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 2 to 3. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v2...v3) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 1803b51e8..268a23b44 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -49,7 +49,7 @@ jobs: - name: Build and push - Docker/ECR id: docker_build - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v3 with: context: . platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x From 1cd61c2ac5118edc451842371156d1ba69c91507 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 May 2022 15:33:29 +0200 Subject: [PATCH 062/262] Bump k8s.io/client-go from 0.23.6 to 0.24.0 (#1176) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.23.6 to 0.24.0. - [Release notes](https://github.com/kubernetes/client-go/releases) - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.23.6...v0.24.0) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 33 ++++++++++++++++++--------------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index 4b7a3b2f8..fe3881c2f 100644 --- a/go.mod +++ b/go.mod @@ -15,5 +15,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.5 gorm.io/gorm v1.23.5 - k8s.io/client-go v0.23.6 + k8s.io/client-go v0.24.0 ) diff --git a/go.sum b/go.sum index 950b341f6..429a0ad6e 100644 --- a/go.sum +++ b/go.sum @@ -78,6 +78,7 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.44.5 h1:T4mckpWUfplPG4GA3FDWDCM1QaCzisjGzzeCVBhHKwQ= github.com/aws/aws-sdk-go v1.44.5/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= @@ -117,6 +118,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -154,7 +156,9 @@ github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -198,6 +202,7 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -240,8 +245,6 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -339,6 +342,7 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -368,6 +372,7 @@ github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamh github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -401,6 +406,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= @@ -537,9 +543,9 @@ golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -622,7 +628,6 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -727,7 +732,6 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -742,7 +746,6 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -757,7 +760,7 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1026,21 +1029,21 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.6/go.mod h1:1kFaYxGCFHYp3qd6a85DAj/yW8aVD6XLZMqJclkoi9g= -k8s.io/apimachinery v0.23.6/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/client-go v0.23.6 h1:7h4SctDVQAQbkHQnR4Kzi7EyUyvla5G1pFWf4+Od7hQ= -k8s.io/client-go v0.23.6/go.mod h1:Umt5icFOMLV/+qbtZ3PR0D+JA6lvvb3syzodv4irpK4= +k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= +k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/client-go v0.24.0 h1:lbE4aB1gTHvYFSwm6eD3OF14NhFDKCejlnsGYlSJe5U= +k8s.io/client-go v0.24.0/go.mod h1:VFPQET+cAFpYxh6Bq6f4xyMY80G6jKKktU6G0m00VDw= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= +sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= From e8654d028163a9d29d53193f4c5a672208a9539e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 May 2022 16:13:44 +0200 Subject: [PATCH 063/262] Bump ubi8/ubi-minimal from 8.5 to 8.6 (#1182) Bumps ubi8/ubi-minimal from 8.5 to 8.6. --- updated-dependencies: - dependency-name: ubi8/ubi-minimal dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile.ubi8 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.ubi8 b/Dockerfile.ubi8 index c3f6b1fe1..cea894995 100644 --- a/Dockerfile.ubi8 +++ b/Dockerfile.ubi8 @@ -11,7 +11,7 @@ RUN make build && cp kube-bench /go/bin/kube-bench # ubi8-minimal base image for build with ubi standards -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.5 as run +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.6 as run RUN microdnf install yum findutils openssl\ && yum -y update-minimal --security --sec-severity=Moderate --sec-severity=Important --sec-severity=Critical \ From 9c3d2051a5ab0d3db37c8d211d5c5dc97a3a7563 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 May 2022 09:40:46 +0200 Subject: [PATCH 064/262] Bump github.com/aws/aws-sdk-go from 1.44.5 to 1.44.16 (#1184) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.5 to 1.44.16. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.5...v1.44.16) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fe3881c2f..86526604b 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.44.5 + github.com/aws/aws-sdk-go v1.44.16 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index 429a0ad6e..3dfb4c753 100644 --- a/go.sum +++ b/go.sum @@ -80,8 +80,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.44.5 h1:T4mckpWUfplPG4GA3FDWDCM1QaCzisjGzzeCVBhHKwQ= -github.com/aws/aws-sdk-go v1.44.5/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.16 h1:6voHuNZZNWo71MdNlym4eRlcogTeTSk9Ipo6qDJWzoU= +github.com/aws/aws-sdk-go v1.44.16/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From 181d621456909d55c6d06d49a1ab4aa1f1278b74 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Sun, 22 May 2022 00:06:30 +0800 Subject: [PATCH 065/262] ASFF: add cluster arn to the finding ID (#1185) --- check/controls.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/check/controls.go b/check/controls.go index 060198cef..08e28ad6b 100644 --- a/check/controls.go +++ b/check/controls.go @@ -249,7 +249,7 @@ func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { AwsAccountId: aws.String(a), Confidence: aws.Int64(100), GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", arn, controls.Version, check.ID)), - Id: aws.String(fmt.Sprintf("%s%sEKSnodeID+%s", arn, a, check.ID)), + Id: aws.String(fmt.Sprintf("%s%sEKSnodeID+%s+%s", arn, a, check.ID, c)), CreatedAt: aws.String(tf), Description: aws.String(check.Text), ProductArn: aws.String(arn), From 41e3e64472072a46283200cbb661a3eb061a8966 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Sun, 22 May 2022 12:44:07 +0800 Subject: [PATCH 066/262] use log instead of print (#1186) --- cmd/common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/common.go b/cmd/common.go index 20e258a0b..cf29794a5 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -133,7 +133,7 @@ func generateDefaultEnvAudit(controls *check.Controls, binSubs []string) { if len(binSubs) == 1 { binPath = binSubs[0] } else { - fmt.Printf("AuditEnv not explicit for check (%s), where bin path cannot be determined\n", checkItem.ID) + glog.V(1).Infof("AuditEnv not explicit for check (%s), where bin path cannot be determined", checkItem.ID) } if test.Env != "" && checkItem.AuditEnv == "" { From 681f8fe0db7ac98f40c11f75d181b4fc5a0c0b2e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 May 2022 07:53:11 +0200 Subject: [PATCH 067/262] Bump goreleaser/goreleaser-action from 2 to 3 (#1187) Bumps [goreleaser/goreleaser-action](https://github.com/goreleaser/goreleaser-action) from 2 to 3. - [Release notes](https://github.com/goreleaser/goreleaser-action/releases) - [Commits](https://github.com/goreleaser/goreleaser-action/compare/v2...v3) --- updated-dependencies: - dependency-name: goreleaser/goreleaser-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- .github/workflows/release.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 48dd5ebb7..ea70425bd 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -95,7 +95,7 @@ jobs: with: fetch-depth: 0 - name: Dry-run release snapshot - uses: goreleaser/goreleaser-action@v2 + uses: goreleaser/goreleaser-action@v3 with: distribution: goreleaser version: v1.7.0 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 59a8848d9..996930c83 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -44,7 +44,7 @@ jobs: second_file_path: integration/testdata/Expected_output.data expected_result: PASSED - name: Release - uses: goreleaser/goreleaser-action@v2 + uses: goreleaser/goreleaser-action@v3 with: distribution: goreleaser version: v1.7.0 From 5685f816768828a5bb85630a8a3b95c5e96a487e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 31 May 2022 08:48:01 +0300 Subject: [PATCH 068/262] Bump github.com/spf13/viper from 1.11.0 to 1.12.0 (#1194) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.11.0 to 1.12.0. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.11.0...v1.12.0) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 95 +++++++++++++++++++++++++++++++++++++++++++++------------- 2 files changed, 75 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index 86526604b..e12c4bcf3 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.4.0 - github.com/spf13/viper v1.11.0 + github.com/spf13/viper v1.12.0 github.com/stretchr/testify v1.7.1 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.5 diff --git a/go.sum b/go.sum index 3dfb4c753..bb9fcaf49 100644 --- a/go.sum +++ b/go.sum @@ -38,6 +38,8 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= @@ -72,6 +74,7 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -102,6 +105,7 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= @@ -116,6 +120,7 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -127,6 +132,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -135,10 +141,12 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -216,6 +224,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -245,10 +255,12 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= @@ -343,21 +355,27 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -397,8 +415,9 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -408,6 +427,7 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= @@ -426,10 +446,10 @@ github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= -github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.0-beta.8 h1:dy81yyLYJDwMTifq24Oi/IslOslRrDSb3jwDggjz3Z0= -github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= +github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -443,23 +463,31 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA= +github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= @@ -468,20 +496,21 @@ github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFR github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.11.0 h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44= -github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -495,8 +524,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= +github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -504,9 +533,10 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI= +go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= +go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= +go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -631,8 +661,10 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4= golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y= +golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -664,6 +696,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -691,6 +724,7 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -706,6 +740,8 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -714,6 +750,7 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -726,6 +763,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -742,8 +780,10 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -829,8 +869,9 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -867,6 +908,9 @@ google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQ google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -947,6 +991,12 @@ google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2 google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -975,6 +1025,8 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1015,8 +1067,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.3.5 h1:oVLmefGqBTlgeEVG6LKnH6krOlo4TZ3Q/jIK21KUMlw= gorm.io/driver/postgres v1.3.5/go.mod h1:EGCWefLFQSVFrHGy4J8EtiHCWX5Q8t0yz2Jt9aKkGzU= gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= From b03069171f1bce7c30610d6fef8fe3d9fcd7d4b5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 31 May 2022 09:38:02 +0300 Subject: [PATCH 069/262] Bump k8s.io/client-go from 0.24.0 to 0.24.1 (#1196) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.24.0 to 0.24.1. - [Release notes](https://github.com/kubernetes/client-go/releases) - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.24.0...v0.24.1) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index e12c4bcf3..1e1fac958 100644 --- a/go.mod +++ b/go.mod @@ -15,5 +15,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.5 gorm.io/gorm v1.23.5 - k8s.io/client-go v0.24.0 + k8s.io/client-go v0.24.1 ) diff --git a/go.sum b/go.sum index bb9fcaf49..63844841b 100644 --- a/go.sum +++ b/go.sum @@ -1082,10 +1082,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= -k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/client-go v0.24.0 h1:lbE4aB1gTHvYFSwm6eD3OF14NhFDKCejlnsGYlSJe5U= -k8s.io/client-go v0.24.0/go.mod h1:VFPQET+cAFpYxh6Bq6f4xyMY80G6jKKktU6G0m00VDw= +k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= +k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E= +k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= From e0704da7d086cffba684deeb60dec8d8c1c665f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Jun 2022 15:40:20 +0300 Subject: [PATCH 070/262] Bump golang from 1.18.1 to 1.18.2 (#1181) Bumps golang from 1.18.1 to 1.18.2. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.ubi8 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 5cd8f7115..967e0e104 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18.1 AS build +FROM golang:1.18.2 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi8 b/Dockerfile.ubi8 index cea894995..43bd55317 100644 --- a/Dockerfile.ubi8 +++ b/Dockerfile.ubi8 @@ -1,4 +1,4 @@ -FROM golang:1.18.1 AS build +FROM golang:1.18.2 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 3f85968c3c65e29fd3d534c6de52d5554449724b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Jun 2022 18:19:44 +0300 Subject: [PATCH 071/262] Bump alpine from 3.15.4 to 3.16.0 (#1193) Bumps alpine from 3.15.4 to 3.16.0. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 967e0e104..3a51e2803 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.15.4 AS run +FROM alpine:3.16.0 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 From f4233254233efe8534090c052767b15f8f97d060 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jun 2022 18:44:42 +0300 Subject: [PATCH 072/262] Bump gorm.io/driver/postgres from 1.3.5 to 1.3.7 (#1195) Bumps [gorm.io/driver/postgres](https://github.com/go-gorm/postgres) from 1.3.5 to 1.3.7. - [Release notes](https://github.com/go-gorm/postgres/releases) - [Commits](https://github.com/go-gorm/postgres/compare/v1.3.5...v1.3.7) --- updated-dependencies: - dependency-name: gorm.io/driver/postgres dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 1e1fac958..c0af75fea 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/spf13/viper v1.12.0 github.com/stretchr/testify v1.7.1 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.3.5 + gorm.io/driver/postgres v1.3.7 gorm.io/gorm v1.23.5 k8s.io/client-go v0.24.1 ) diff --git a/go.sum b/go.sum index 63844841b..b8218a9ef 100644 --- a/go.sum +++ b/go.sum @@ -308,8 +308,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.12.0 h1:/RvQ24k3TnNdfBSW0ou9EOi5jx2cX7zfE8n2nLKuiP0= -github.com/jackc/pgconn v1.12.0/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono= +github.com/jackc/pgconn v1.12.1 h1:rsDFzIpRk7xT4B8FufgpCCeyjdNpKyghZeSefViE5W8= +github.com/jackc/pgconn v1.12.1/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -340,8 +340,8 @@ github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08 github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.16.0 h1:4k1tROTJctHotannFYzu77dY3bgtMRymQP7tXQjqpPk= -github.com/jackc/pgx/v4 v4.16.0/go.mod h1:N0A9sFdWzkw/Jy1lwoiB64F2+ugFZi987zRxcPez/wI= +github.com/jackc/pgx/v4 v4.16.1 h1:JzTglcal01DrghUqt+PmzWsZx/Yh7SC/CTQmSBMTd0Y= +github.com/jackc/pgx/v4 v4.16.1/go.mod h1:SIhx0D5hoADaiXZVyv+3gSm3LCIIINTVO0PficsvWGQ= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= @@ -1070,8 +1070,8 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.3.5 h1:oVLmefGqBTlgeEVG6LKnH6krOlo4TZ3Q/jIK21KUMlw= -gorm.io/driver/postgres v1.3.5/go.mod h1:EGCWefLFQSVFrHGy4J8EtiHCWX5Q8t0yz2Jt9aKkGzU= +gorm.io/driver/postgres v1.3.7 h1:FKF6sIMDHDEvvMF/XJvbnCl0nu6KSKUaPXevJ4r+VYQ= +gorm.io/driver/postgres v1.3.7/go.mod h1:f02ympjIcgtHEGFMZvdgTxODZ9snAHDb4hXfigBVuNI= gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.23.5 h1:TnlF26wScKSvknUC/Rn8t0NLLM22fypYBlvj1+aH6dM= gorm.io/gorm v1.23.5/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= From b6bc27db17926dac0996e73a9949c44cbf7c53b5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jun 2022 22:37:41 +0300 Subject: [PATCH 073/262] Bump golang from 1.18.2 to 1.18.3 (#1199) Bumps golang from 1.18.2 to 1.18.3. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.ubi8 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 3a51e2803..e619270ca 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18.2 AS build +FROM golang:1.18.3 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi8 b/Dockerfile.ubi8 index 43bd55317..2785215d3 100644 --- a/Dockerfile.ubi8 +++ b/Dockerfile.ubi8 @@ -1,4 +1,4 @@ -FROM golang:1.18.2 AS build +FROM golang:1.18.3 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 68cd8c6be55ea8116a5ca7f331d6bcc3df12f664 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jun 2022 22:48:38 +0300 Subject: [PATCH 074/262] Bump github.com/stretchr/testify from 1.7.1 to 1.7.2 (#1200) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.1 to 1.7.2. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.7.1...v1.7.2) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c0af75fea..9987cd5e3 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.4.0 github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.7.1 + github.com/stretchr/testify v1.7.2 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.7 gorm.io/gorm v1.23.5 diff --git a/go.sum b/go.sum index b8218a9ef..d36b0d61a 100644 --- a/go.sum +++ b/go.sum @@ -522,8 +522,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= @@ -1068,8 +1069,9 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.3.7 h1:FKF6sIMDHDEvvMF/XJvbnCl0nu6KSKUaPXevJ4r+VYQ= gorm.io/driver/postgres v1.3.7/go.mod h1:f02ympjIcgtHEGFMZvdgTxODZ9snAHDb4hXfigBVuNI= gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= From f90e3a41cdbc686a6535394625e21c0ab87bd666 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jun 2022 21:58:28 +0300 Subject: [PATCH 075/262] Bump github.com/aws/aws-sdk-go from 1.44.16 to 1.44.31 (#1207) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.16 to 1.44.31. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.16...v1.44.31) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9987cd5e3..98a3499aa 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.44.16 + github.com/aws/aws-sdk-go v1.44.31 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index d36b0d61a..7d79b652d 100644 --- a/go.sum +++ b/go.sum @@ -83,8 +83,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.44.16 h1:6voHuNZZNWo71MdNlym4eRlcogTeTSk9Ipo6qDJWzoU= -github.com/aws/aws-sdk-go v1.44.16/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.31 h1:nr3c7pbJbFslL+U7JrM7knBxzksnK52qIZoYX8uk2I4= +github.com/aws/aws-sdk-go v1.44.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From ebdfba55ccba3f3fc9b32db3203b11e3ac1c66e4 Mon Sep 17 00:00:00 2001 From: rhtenhove Date: Fri, 17 Jun 2022 11:01:32 -0400 Subject: [PATCH 076/262] fix: fully qualified image names (#1206) --- docs/installation.md | 2 +- docs/running.md | 28 ++++++++++++++-------------- hack/kind.yaml | 2 +- job-ack.yaml | 12 ++++++++++-- job-aks.yaml | 5 +++-- job-eks-asff.yaml | 13 +++++++++++-- job-eks.yaml | 12 ++++++++++-- job-gke.yaml | 12 ++++++++++-- job-iks.yaml | 5 +++-- job-master.yaml | 2 +- job-node.yaml | 2 +- job.yaml | 2 +- 12 files changed, 66 insertions(+), 31 deletions(-) diff --git a/docs/installation.md b/docs/installation.md index 6d42c0c38..373a7c5fe 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -84,7 +84,7 @@ go build -o kube-bench . This command copies the kube-bench binary and configuration files to your host from the Docker container: **binaries compiled for linux-x86-64 only (so they won't run on macOS or Windows)** ``` -docker run --rm -v `pwd`:/host aquasec/kube-bench:latest install +docker run --rm -v `pwd`:/host docker.io/aquasec/kube-bench:latest install ``` You can then run `./kube-bench`. diff --git a/docs/running.md b/docs/running.md index 0c7889f04..04af7bb50 100644 --- a/docs/running.md +++ b/docs/running.md @@ -15,19 +15,19 @@ It is impossible to inspect the master nodes of managed clusters, e.g. GKE, EKS, You can avoid installing kube-bench on the host by running it inside a container using the host PID namespace and mounting the `/etc` and `/var` directories where the configuration and other files are located on the host so that kube-bench can check their existence and permissions. ``` -docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -t aquasec/kube-bench:latest --version 1.18 +docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -t docker.io/aquasec/kube-bench:latest --version 1.18 ``` > Note: the tests require either the kubelet or kubectl binary in the path in order to auto-detect the Kubernetes version. You can pass `-v $(which kubectl):/usr/local/mount-from-host/bin/kubectl` to resolve this. You will also need to pass in kubeconfig credentials. For example: ``` -docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -v $(which kubectl):/usr/local/mount-from-host/bin/kubectl -v ~/.kube:/.kube -e KUBECONFIG=/.kube/config -t aquasec/kube-bench:latest +docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -v $(which kubectl):/usr/local/mount-from-host/bin/kubectl -v ~/.kube:/.kube -e KUBECONFIG=/.kube/config -t docker.io/aquasec/kube-bench:latest ``` You can use your own configs by mounting them over the default ones in `/opt/kube-bench/cfg/` ``` -docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -t -v path/to/my-config.yaml:/opt/kube-bench/cfg/config.yaml -v $(which kubectl):/usr/local/mount-from-host/bin/kubectl -v ~/.kube:/.kube -e KUBECONFIG=/.kube/config aquasec/kube-bench:latest +docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -t -v path/to/my-config.yaml:/opt/kube-bench/cfg/config.yaml -v $(which kubectl):/usr/local/mount-from-host/bin/kubectl -v ~/.kube:/.kube -e KUBECONFIG=/.kube/config docker.io/aquasec/kube-bench:latest ``` ### Running in a Kubernetes cluster @@ -72,7 +72,7 @@ could open nsg 22 port and assign a public ip for one agent node (only for testi 1. Run CIS benchmark to view results: ``` -docker run --rm -v `pwd`:/host aquasec/kube-bench:latest install +docker run --rm -v `pwd`:/host docker.io/aquasec/kube-bench:latest install ./kube-bench ``` kube-bench cannot be run on AKS master nodes @@ -107,9 +107,9 @@ docker push .dkr.ecr..amazonaws.com/k8s/kube-bench: ### Running on OpenShift | OpenShift Hardening Guide | kube-bench config | -|---|---| -| ocp-3.10 +| rh-0.7 | -| ocp-4.1 +| rh-1.0 | +| ------------------------- | ----------------- | +| ocp-3.10 + | rh-0.7 | +| ocp-4.1 + | rh-1.0 | kube-bench includes a set of test files for Red Hat's OpenShift hardening guide for OCP 3.10 and 4.1. To run this you will need to specify `--benchmark rh-07`, or `--version ocp-3.10` or,`--version ocp-4.5` or `--benchmark rh-1.0` @@ -117,10 +117,10 @@ kube-bench includes a set of test files for Red Hat's OpenShift hardening guide ### Running in a GKE cluster -| CIS Benchmark | Targets | -|---|---| -| gke-1.0| master, controlplane, node, etcd, policies, managedservices | -| gke-1.2.0| master, controlplane, node, policies, managedservices | +| CIS Benchmark | Targets | +| ------------- | ----------------------------------------------------------- | +| gke-1.0 | master, controlplane, node, etcd, policies, managedservices | +| gke-1.2.0 | master, controlplane, node, policies, managedservices | kube-bench includes benchmarks for GKE. To run this you will need to specify `--benchmark gke-1.0` or `--benchmark gke-1.2.0` when you run the `kube-bench` command. @@ -132,9 +132,9 @@ kubectl apply -f job-gke.yaml ### Running in a ACK cluster -| CIS Benchmark | Targets | -|---|---| -| ack-1.0| master, controlplane, node, etcd, policies, managedservices | +| CIS Benchmark | Targets | +| ------------- | ----------------------------------------------------------- | +| ack-1.0 | master, controlplane, node, etcd, policies, managedservices | kube-bench includes benchmarks for Alibaba Cloud Container Service For Kubernetes (ACK). To run this you will need to specify `--benchmark ack-1.0` when you run the `kube-bench` command. diff --git a/hack/kind.yaml b/hack/kind.yaml index 0cf3888d4..7f088f150 100644 --- a/hack/kind.yaml +++ b/hack/kind.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:${VERSION} + image: docker.io/aquasec/kube-bench:${VERSION} command: ["kube-bench"] volumeMounts: - name: var-lib-etcd diff --git a/job-ack.yaml b/job-ack.yaml index ecc18199c..f34f6dd04 100644 --- a/job-ack.yaml +++ b/job-ack.yaml @@ -9,8 +9,16 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:latest - command: ["kube-bench", "run", "--targets", "node,policies,managedservices", "--benchmark", "ack-1.0"] + image: docker.io/aquasec/kube-bench:latest + command: + [ + "kube-bench", + "run", + "--targets", + "node,policies,managedservices", + "--benchmark", + "ack-1.0", + ] volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet diff --git a/job-aks.yaml b/job-aks.yaml index 329c86b2c..652a4a5ef 100644 --- a/job-aks.yaml +++ b/job-aks.yaml @@ -9,8 +9,9 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:latest - command: ["kube-bench", "run", "--targets", "node", "--benchmark", "aks-1.0"] + image: docker.io/aquasec/kube-bench:latest + command: + ["kube-bench", "run", "--targets", "node", "--benchmark", "aks-1.0"] volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet diff --git a/job-eks-asff.yaml b/job-eks-asff.yaml index 426c5484a..814cd1b48 100644 --- a/job-eks-asff.yaml +++ b/job-eks-asff.yaml @@ -32,8 +32,17 @@ spec: - name: kube-bench # Push the image to your ECR and then refer to it here # image: - image: aquasec/kube-bench:latest - command: ["kube-bench", "run", "--targets", "node", "--benchmark", "eks-1.0.1", "--asff"] + image: docker.io/aquasec/kube-bench:latest + command: + [ + "kube-bench", + "run", + "--targets", + "node", + "--benchmark", + "eks-1.0.1", + "--asff", + ] volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet diff --git a/job-eks.yaml b/job-eks.yaml index ed269e06a..5a946fa55 100644 --- a/job-eks.yaml +++ b/job-eks.yaml @@ -11,9 +11,17 @@ spec: - name: kube-bench # Push the image to your ECR and then refer to it here # image: - image: aquasec/kube-bench:latest + image: docker.io/aquasec/kube-bench:latest # To send findings to AWS Security Hub, refer to `job-eks-asff.yaml` instead - command: ["kube-bench", "run", "--targets", "node", "--benchmark", "eks-1.0.1"] + command: + [ + "kube-bench", + "run", + "--targets", + "node", + "--benchmark", + "eks-1.0.1", + ] volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet diff --git a/job-gke.yaml b/job-gke.yaml index 2e6d1e956..c87b18492 100644 --- a/job-gke.yaml +++ b/job-gke.yaml @@ -9,8 +9,16 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:latest - command: ["kube-bench", "run", "--targets", "node,policies,managedservices", "--benchmark", "gke-1.2.0"] + image: docker.io/aquasec/kube-bench:latest + command: + [ + "kube-bench", + "run", + "--targets", + "node,policies,managedservices", + "--benchmark", + "gke-1.2.0", + ] volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet diff --git a/job-iks.yaml b/job-iks.yaml index a67ad407f..f8092d094 100644 --- a/job-iks.yaml +++ b/job-iks.yaml @@ -9,8 +9,9 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:latest - command: ["kube-bench", "run", "--targets", "node", "--version", "1.20"] + image: docker.io/aquasec/kube-bench:latest + command: + ["kube-bench", "run", "--targets", "node", "--version", "1.20"] volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet diff --git a/job-master.yaml b/job-master.yaml index e3be12b58..ee30cb8ea 100644 --- a/job-master.yaml +++ b/job-master.yaml @@ -15,7 +15,7 @@ spec: effect: NoSchedule containers: - name: kube-bench - image: aquasec/kube-bench:latest + image: docker.io/aquasec/kube-bench:latest command: ["kube-bench", "run", "--targets", "master"] volumeMounts: - name: var-lib-etcd diff --git a/job-node.yaml b/job-node.yaml index b45231775..0e6f9a9f9 100644 --- a/job-node.yaml +++ b/job-node.yaml @@ -9,7 +9,7 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:latest + image: docker.io/aquasec/kube-bench:latest command: ["kube-bench", "run", "--targets", "node"] volumeMounts: - name: var-lib-etcd diff --git a/job.yaml b/job.yaml index 201420e31..96dcaf482 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:v0.6.8 + image: docker.io/aquasec/kube-bench:v0.6.8 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From ed5ebc57406c62573db472a172400bd740c6cf31 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jun 2022 18:17:57 +0300 Subject: [PATCH 077/262] Bump crazy-max/ghaction-docker-meta from 3 to 4 (#1172) Bumps [crazy-max/ghaction-docker-meta](https://github.com/crazy-max/ghaction-docker-meta) from 3 to 4. - [Release notes](https://github.com/crazy-max/ghaction-docker-meta/releases) - [Upgrade guide](https://github.com/docker/metadata-action/blob/master/UPGRADE.md) - [Commits](https://github.com/crazy-max/ghaction-docker-meta/compare/v3...v4) --- updated-dependencies: - dependency-name: crazy-max/ghaction-docker-meta dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 268a23b44..c32ff3a92 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -41,7 +41,7 @@ jobs: password: ${{ secrets.ECR_SECRET_ACCESS_KEY }} - name: Get version id: get_version - uses: crazy-max/ghaction-docker-meta@v3 + uses: crazy-max/ghaction-docker-meta@v4 with: images: ${{ env.REP }} tag-semver: | From ce53cffc70ccc2be93a2896ba2d11b0608372003 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 17 Jun 2022 18:42:00 +0300 Subject: [PATCH 078/262] Bump actions/setup-python from 3 to 4 (#1208) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 3 to 4. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mkdocs-deploy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mkdocs-deploy.yaml b/.github/workflows/mkdocs-deploy.yaml index a55bcd5a2..8e22cc71a 100644 --- a/.github/workflows/mkdocs-deploy.yaml +++ b/.github/workflows/mkdocs-deploy.yaml @@ -20,7 +20,7 @@ jobs: with: fetch-depth: 0 persist-credentials: true - - uses: actions/setup-python@v3 + - uses: actions/setup-python@v4 with: python-version: 3.x - run: | From 907d952fb3cfd7ed1ee8d149d0944823fb8d0141 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Sun, 19 Jun 2022 16:48:40 +0800 Subject: [PATCH 079/262] ASFF: add node name to the finding id (#1214) --- check/controls.go | 15 ++++++++++----- job-eks-asff.yaml | 5 +++++ 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/check/controls.go b/check/controls.go index 08e28ad6b..95c287e2e 100644 --- a/check/controls.go +++ b/check/controls.go @@ -208,11 +208,11 @@ func (controls *Controls) JUnit() ([]byte, error) { // ASFF encodes the results of last run to AWS Security Finding Format(ASFF). func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { fs := []*securityhub.AwsSecurityFinding{} - a, err := getConfig("AWS_ACCOUNT") + account, err := getConfig("AWS_ACCOUNT") if err != nil { return nil, err } - c, err := getConfig("CLUSTER_ARN") + cluster, err := getConfig("CLUSTER_ARN") if err != nil { return nil, err } @@ -220,6 +220,7 @@ func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { if err != nil { return nil, err } + nodeName, _ := getConfig("NODE_NAME") arn := fmt.Sprintf(ARN, region) ti := time.Now() @@ -244,12 +245,16 @@ func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { if len(check.Reason) > 1024 { reason = check.Reason[0:1023] } + id := aws.String(fmt.Sprintf("%s%sEKSnodeID+%s+%s", arn, account, check.ID, cluster)) + if nodeName != "" { + id = aws.String(fmt.Sprintf("%s%sEKSnodeID+%s+%s+%s", arn, account, check.ID, cluster, nodeName)) + } f := securityhub.AwsSecurityFinding{ - AwsAccountId: aws.String(a), + AwsAccountId: aws.String(account), Confidence: aws.Int64(100), GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", arn, controls.Version, check.ID)), - Id: aws.String(fmt.Sprintf("%s%sEKSnodeID+%s+%s", arn, a, check.ID, c)), + Id: id, CreatedAt: aws.String(tf), Description: aws.String(check.Text), ProductArn: aws.String(arn), @@ -274,7 +279,7 @@ func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { }, Resources: []*securityhub.Resource{ { - Id: aws.String(c), + Id: aws.String(cluster), Type: aws.String(TYPE), }, }, diff --git a/job-eks-asff.yaml b/job-eks-asff.yaml index 814cd1b48..5e079cc2a 100644 --- a/job-eks-asff.yaml +++ b/job-eks-asff.yaml @@ -43,6 +43,11 @@ spec: "eks-1.0.1", "--asff", ] + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet From e6b3eddb03613d60e750fbb24c4aeeb4cc5eefdd Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Sun, 19 Jun 2022 22:10:37 +0800 Subject: [PATCH 080/262] fix 4.2.11 in cis-1.20 should be Automated (#1213) --- cfg/cis-1.20/node.yaml | 4 ++-- integration/testdata/Expected_output.data | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cfg/cis-1.20/node.yaml b/cfg/cis-1.20/node.yaml index aa12820d0..56081ddad 100644 --- a/cfg/cis-1.20/node.yaml +++ b/cfg/cis-1.20/node.yaml @@ -388,7 +388,7 @@ groups: scored: false - id: 4.2.11 - text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/cat $kubeletconf" tests: @@ -412,7 +412,7 @@ groups: Based on your system, restart the kubelet service. For example: systemctl daemon-reload systemctl restart kubelet.service - scored: false + scored: true - id: 4.2.12 text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" diff --git a/integration/testdata/Expected_output.data b/integration/testdata/Expected_output.data index 1dd154e0d..0df5ce0b3 100644 --- a/integration/testdata/Expected_output.data +++ b/integration/testdata/Expected_output.data @@ -240,7 +240,7 @@ minimum. [PASS] 4.2.8 Ensure that the --hostname-override argument is not set (Manual) [WARN] 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual) [WARN] 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual) -[PASS] 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Manual) +[PASS] 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Automated) [PASS] 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Manual) [WARN] 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) From 7078772f6d475bcefb8178c34fc8297624b3cd34 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jun 2022 11:35:24 +0300 Subject: [PATCH 081/262] Bump gorm.io/gorm from 1.23.5 to 1.23.6 (#1209) Bumps [gorm.io/gorm](https://github.com/go-gorm/gorm) from 1.23.5 to 1.23.6. - [Release notes](https://github.com/go-gorm/gorm/releases) - [Commits](https://github.com/go-gorm/gorm/compare/v1.23.5...v1.23.6) --- updated-dependencies: - dependency-name: gorm.io/gorm dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 98a3499aa..0baf38a07 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,6 @@ require ( github.com/stretchr/testify v1.7.2 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.7 - gorm.io/gorm v1.23.5 + gorm.io/gorm v1.23.6 k8s.io/client-go v0.24.1 ) diff --git a/go.sum b/go.sum index 7d79b652d..e1ddcaae8 100644 --- a/go.sum +++ b/go.sum @@ -1075,8 +1075,8 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.3.7 h1:FKF6sIMDHDEvvMF/XJvbnCl0nu6KSKUaPXevJ4r+VYQ= gorm.io/driver/postgres v1.3.7/go.mod h1:f02ympjIcgtHEGFMZvdgTxODZ9snAHDb4hXfigBVuNI= gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= -gorm.io/gorm v1.23.5 h1:TnlF26wScKSvknUC/Rn8t0NLLM22fypYBlvj1+aH6dM= -gorm.io/gorm v1.23.5/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/gorm v1.23.6 h1:KFLdNgri4ExFFGTRGGFWON2P1ZN28+9SJRN8voOoYe0= +gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 3321b2d12999b6d3f086c66abe578f91b2951a85 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jun 2022 11:48:30 +0300 Subject: [PATCH 082/262] build(deps): bump k8s.io/client-go from 0.24.1 to 0.24.2 (#1215) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.24.1 to 0.24.2. - [Release notes](https://github.com/kubernetes/client-go/releases) - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.24.1...v0.24.2) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 0baf38a07..b66a5beea 100644 --- a/go.mod +++ b/go.mod @@ -15,5 +15,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.7 gorm.io/gorm v1.23.6 - k8s.io/client-go v0.24.1 + k8s.io/client-go v0.24.2 ) diff --git a/go.sum b/go.sum index e1ddcaae8..5af9195b3 100644 --- a/go.sum +++ b/go.sum @@ -1084,10 +1084,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= -k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E= -k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8= +k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= +k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= +k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= From 3c3cffa5ae79efb4e5c0fa6a7ac9f1a608a7941a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jun 2022 12:59:28 +0300 Subject: [PATCH 083/262] build(deps): bump github.com/spf13/cobra from 1.4.0 to 1.5.0 (#1218) Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.4.0 to 1.5.0. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.4.0...v1.5.0) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index b66a5beea..d919f16ee 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/magiconair/properties v1.8.6 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.4.0 + github.com/spf13/cobra v1.5.0 github.com/spf13/viper v1.12.0 github.com/stretchr/testify v1.7.2 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 5af9195b3..15692a74d 100644 --- a/go.sum +++ b/go.sum @@ -113,7 +113,7 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -503,8 +503,8 @@ github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= From af7e0c0f0bb7b6d7351bb5954734b8a6709c9ccd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 28 Jun 2022 13:26:21 +0300 Subject: [PATCH 084/262] build(deps): bump github.com/stretchr/testify from 1.7.2 to 1.7.5 (#1219) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.2 to 1.7.5. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.7.2...v1.7.5) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index d919f16ee..02a664c18 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.5.0 github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.7.5 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.7 gorm.io/gorm v1.23.6 diff --git a/go.sum b/go.sum index 15692a74d..c2722c3cb 100644 --- a/go.sum +++ b/go.sum @@ -514,8 +514,9 @@ github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiu github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -523,8 +524,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.7.5 h1:s5PTfem8p8EbKQOctVV53k6jCJt3UX4IEJzwh+C324Q= +github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= From 7a68b387634f8e975fc5df2f869c4279baaf6d52 Mon Sep 17 00:00:00 2001 From: Anupam Tamrakar Date: Tue, 9 Aug 2022 00:24:37 +0530 Subject: [PATCH 085/262] Updating checks 4.2.1 and 4.2.3 (#1236) Removing colon from these checks so that grep command will work with both communication method (YAML and JSON) --- cfg/rh-1.0/node.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cfg/rh-1.0/node.yaml b/cfg/rh-1.0/node.yaml index 80dbd68b9..7b5fab12f 100644 --- a/cfg/rh-1.0/node.yaml +++ b/cfg/rh-1.0/node.yaml @@ -186,7 +186,7 @@ groups: audit: | for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') do - oc debug node/${node} -- chroot /host grep -B4 -A1 anonymous: /etc/kubernetes/kubelet.conf + oc debug node/${node} -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf done use_multiple_values: true tests: @@ -222,7 +222,7 @@ groups: audit: | for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') do - oc debug node/${node} -- chroot /host grep clientCAFile: /etc/kubernetes/kubelet.conf + oc debug node/${node} -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf done 2> /dev/null use_multiple_values: true tests: From eaa84fc34e49b0e79038c34287dea27c4ea7e5e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Aug 2022 10:41:18 -0300 Subject: [PATCH 086/262] build(deps): bump github.com/aws/aws-sdk-go from 1.44.31 to 1.44.71 (#1238) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.31 to 1.44.71. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.31...v1.44.71) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 02a664c18..884d4f7eb 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.44.31 + github.com/aws/aws-sdk-go v1.44.71 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index c2722c3cb..8e2268ed6 100644 --- a/go.sum +++ b/go.sum @@ -83,8 +83,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.44.31 h1:nr3c7pbJbFslL+U7JrM7knBxzksnK52qIZoYX8uk2I4= -github.com/aws/aws-sdk-go v1.44.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.71 h1:e5ZbeFAdDB9i7NcQWdmIiA/NOC4aWec3syOUtUE0dBA= +github.com/aws/aws-sdk-go v1.44.71/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From 7b9ec26432868e7c7aeeb0d34f1825ee58c6161b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Aug 2022 11:21:11 -0300 Subject: [PATCH 087/262] build(deps): bump github.com/stretchr/testify from 1.7.5 to 1.8.0 (#1226) Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.7.5 to 1.8.0. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.7.5...v1.8.0) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 884d4f7eb..a196fe183 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.5.0 github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.7.5 + github.com/stretchr/testify v1.8.0 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.7 gorm.io/gorm v1.23.6 diff --git a/go.sum b/go.sum index 8e2268ed6..1d9d5369f 100644 --- a/go.sum +++ b/go.sum @@ -524,8 +524,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.5 h1:s5PTfem8p8EbKQOctVV53k6jCJt3UX4IEJzwh+C324Q= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= From 670782c47b725dbc41ff37a1f2e87095e5d4a4bb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Aug 2022 11:31:36 -0300 Subject: [PATCH 088/262] build(deps): bump gorm.io/gorm from 1.23.6 to 1.23.8 (#1227) Bumps [gorm.io/gorm](https://github.com/go-gorm/gorm) from 1.23.6 to 1.23.8. - [Release notes](https://github.com/go-gorm/gorm/releases) - [Commits](https://github.com/go-gorm/gorm/compare/v1.23.6...v1.23.8) --- updated-dependencies: - dependency-name: gorm.io/gorm dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a196fe183..693aea955 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,6 @@ require ( github.com/stretchr/testify v1.8.0 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.7 - gorm.io/gorm v1.23.6 + gorm.io/gorm v1.23.8 k8s.io/client-go v0.24.2 ) diff --git a/go.sum b/go.sum index 1d9d5369f..9d2e5a878 100644 --- a/go.sum +++ b/go.sum @@ -1076,8 +1076,8 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.3.7 h1:FKF6sIMDHDEvvMF/XJvbnCl0nu6KSKUaPXevJ4r+VYQ= gorm.io/driver/postgres v1.3.7/go.mod h1:f02ympjIcgtHEGFMZvdgTxODZ9snAHDb4hXfigBVuNI= gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= -gorm.io/gorm v1.23.6 h1:KFLdNgri4ExFFGTRGGFWON2P1ZN28+9SJRN8voOoYe0= -gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/gorm v1.23.8 h1:h8sGJ+biDgBA1AD1Ha9gFCx7h8npU7AsLdlkX0n2TpE= +gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From ddae7417efe1decf0502e23b80e8b0d7e1c7839e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Aug 2022 11:54:33 -0300 Subject: [PATCH 089/262] build(deps): bump k8s.io/client-go from 0.24.2 to 0.24.3 (#1230) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.24.2 to 0.24.3. - [Release notes](https://github.com/kubernetes/client-go/releases) - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.24.2...v0.24.3) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 693aea955..666c1c480 100644 --- a/go.mod +++ b/go.mod @@ -15,5 +15,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.7 gorm.io/gorm v1.23.8 - k8s.io/client-go v0.24.2 + k8s.io/client-go v0.24.3 ) diff --git a/go.sum b/go.sum index 9d2e5a878..3d8f684c1 100644 --- a/go.sum +++ b/go.sum @@ -1085,10 +1085,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= -k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= +k8s.io/api v0.24.3/go.mod h1:elGR/XSZrS7z7cSZPzVWaycpJuGIw57j9b95/1PdJNI= +k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/client-go v0.24.3 h1:Nl1840+6p4JqkFWEW2LnMKU667BUxw03REfLAVhuKQY= +k8s.io/client-go v0.24.3/go.mod h1:AAovolf5Z9bY1wIg2FZ8LPQlEdKHjLI7ZD4rw920BJw= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= From 1c46d386a8131ff4fe91d28a435fde058e24b993 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Aug 2022 14:00:09 -0300 Subject: [PATCH 090/262] build(deps): bump alpine from 3.16.0 to 3.16.2 (#1240) Bumps alpine from 3.16.0 to 3.16.2. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index e619270ca..c5ce7ee9b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.16.0 AS run +FROM alpine:3.16.2 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 From c4e904d90912181305b5852cfa2821bbf310bdc1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Aug 2022 14:10:22 -0300 Subject: [PATCH 091/262] build(deps): bump gorm.io/driver/postgres from 1.3.7 to 1.3.8 (#1224) Bumps [gorm.io/driver/postgres](https://github.com/go-gorm/postgres) from 1.3.7 to 1.3.8. - [Release notes](https://github.com/go-gorm/postgres/releases) - [Commits](https://github.com/go-gorm/postgres/compare/v1.3.7...v1.3.8) --- updated-dependencies: - dependency-name: gorm.io/driver/postgres dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 666c1c480..1d7bfb662 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/spf13/viper v1.12.0 github.com/stretchr/testify v1.8.0 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.3.7 + gorm.io/driver/postgres v1.3.8 gorm.io/gorm v1.23.8 k8s.io/client-go v0.24.3 ) diff --git a/go.sum b/go.sum index 3d8f684c1..83e94d499 100644 --- a/go.sum +++ b/go.sum @@ -1073,9 +1073,9 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.3.7 h1:FKF6sIMDHDEvvMF/XJvbnCl0nu6KSKUaPXevJ4r+VYQ= -gorm.io/driver/postgres v1.3.7/go.mod h1:f02ympjIcgtHEGFMZvdgTxODZ9snAHDb4hXfigBVuNI= -gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/driver/postgres v1.3.8 h1:8bEphSAB69t3odsCR4NDzt581iZEWQuRM27Cg6KgfPY= +gorm.io/driver/postgres v1.3.8/go.mod h1:qB98Aj6AhRO/oyu/jmZsi/YM9g6UzVCjMxO/6frFvcA= +gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.23.8 h1:h8sGJ+biDgBA1AD1Ha9gFCx7h8npU7AsLdlkX0n2TpE= gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 7146b65da42c44989178e431d099d5dec85e5b39 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Aug 2022 14:17:46 -0300 Subject: [PATCH 092/262] build(deps): bump golang from 1.18.3 to 1.19.0 (#1237) Bumps golang from 1.18.3 to 1.19.0. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.ubi8 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index c5ce7ee9b..1095c00c2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.18.3 AS build +FROM golang:1.19.0 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi8 b/Dockerfile.ubi8 index 2785215d3..6e291b5fb 100644 --- a/Dockerfile.ubi8 +++ b/Dockerfile.ubi8 @@ -1,4 +1,4 @@ -FROM golang:1.18.3 AS build +FROM golang:1.19.0 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 93a167a9177ff300a0e25da91a746f2888206ac6 Mon Sep 17 00:00:00 2001 From: Jose Donizetti Date: Wed, 10 Aug 2022 14:38:24 -0300 Subject: [PATCH 093/262] release: prepare v0.6.9 (#1241) Signed-off-by: Jose Donizetti Signed-off-by: Jose Donizetti --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 96dcaf482..d7d65543b 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:v0.6.8 + image: docker.io/aquasec/kube-bench:v0.6.9 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From 113ee337ee900f2e5b65f4471420a5fa08a94e97 Mon Sep 17 00:00:00 2001 From: Dmitriy Dubson Date: Sun, 21 Aug 2022 02:25:15 -0400 Subject: [PATCH 094/262] Update job-master.yaml for K8s 1.24.x labels/tolerations (#1250) (#1251) --- job-master.yaml | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/job-master.yaml b/job-master.yaml index ee30cb8ea..34f24f2f1 100644 --- a/job-master.yaml +++ b/job-master.yaml @@ -7,12 +7,23 @@ spec: template: spec: hostPID: true - nodeSelector: - node-role.kubernetes.io/master: "" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists tolerations: - key: node-role.kubernetes.io/master operator: Exists effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule containers: - name: kube-bench image: docker.io/aquasec/kube-bench:latest From 9e95301f091701640851d30bc9b2f6085ee50419 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 4 Sep 2022 11:39:00 +0300 Subject: [PATCH 095/262] build(deps): bump github.com/aws/aws-sdk-go from 1.44.71 to 1.44.86 (#1264) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.71 to 1.44.86. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.71...v1.44.86) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1d7bfb662..315805125 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.44.71 + github.com/aws/aws-sdk-go v1.44.86 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index 83e94d499..23b77ff3d 100644 --- a/go.sum +++ b/go.sum @@ -83,8 +83,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.44.71 h1:e5ZbeFAdDB9i7NcQWdmIiA/NOC4aWec3syOUtUE0dBA= -github.com/aws/aws-sdk-go v1.44.71/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.86 h1:Zls97WY9N2c2H85//B88CmSlYYNxS3Zf3k4ds5zAf5A= +github.com/aws/aws-sdk-go v1.44.86/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From 59760891bdeab60af77de7bdcd0f60e11dd1c2ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Sep 2022 12:26:26 +0300 Subject: [PATCH 096/262] build(deps): bump github.com/aws/aws-sdk-go from 1.44.86 to 1.44.91 (#1269) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.44.86 to 1.44.91. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Changelog](https://github.com/aws/aws-sdk-go/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.44.86...v1.44.91) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 315805125..548cb1e5b 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.16 require ( - github.com/aws/aws-sdk-go v1.44.86 + github.com/aws/aws-sdk-go v1.44.91 github.com/fatih/color v1.13.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/magiconair/properties v1.8.6 diff --git a/go.sum b/go.sum index 23b77ff3d..edcbf4427 100644 --- a/go.sum +++ b/go.sum @@ -83,8 +83,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.44.86 h1:Zls97WY9N2c2H85//B88CmSlYYNxS3Zf3k4ds5zAf5A= -github.com/aws/aws-sdk-go v1.44.86/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.91 h1:SRWmuX7PTyhBdLuvSfM7KWrWISJsrRsUPcFDSFduRxY= +github.com/aws/aws-sdk-go v1.44.91/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= From e05dabe0a912514046975d69ada6a9838ce8b186 Mon Sep 17 00:00:00 2001 From: chenk Date: Wed, 14 Sep 2022 16:59:58 +0300 Subject: [PATCH 097/262] chore: replace gclib alpine pkg (#1280) * chore: replace gclib alpine pkg Signed-off-by: chenk * chore: replace gclib alpine pkg Signed-off-by: chenk Signed-off-by: chenk --- Dockerfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1095c00c2..676f0fa4c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,8 +26,7 @@ RUN apk update && apk upgrade && apk --no-cache add openssl # Add glibc for running oc command RUN wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub -RUN wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.33-r0/glibc-2.33-r0.apk -RUN apk add glibc-2.33-r0.apk +RUN apk add gcompat RUN apk add jq ENV PATH=$PATH:/usr/local/mount-from-host/bin From a34047c1058b78b9dc84615fe61aa86e5a30d3f2 Mon Sep 17 00:00:00 2001 From: Chris Renzo <86670002+crenzoaws@users.noreply.github.com> Date: Wed, 14 Sep 2022 10:40:48 -0400 Subject: [PATCH 098/262] Adding eks-stig-kubernetes-v1r6 (#1266) * Adding eks-stig-kubernetes-v1r6 * Fixing lint errors * Reformatting texts * Removing pinned docker tag * Updating Expected Stig Output Co-authored-by: EC2 Default User --- CONTRIBUTING.md | 1 + cfg/config.yaml | 5 + cfg/eks-stig-kubernetes-v1r6/config.yaml | 9 + .../controlplane.yaml | 124 ++++++++ .../managedservices.yaml | 268 ++++++++++++++++ cfg/eks-stig-kubernetes-v1r6/master.yaml | 6 + cfg/eks-stig-kubernetes-v1r6/node.yaml | 287 ++++++++++++++++++ cfg/eks-stig-kubernetes-v1r6/policies.yaml | 33 ++ docs/architecture.md | 6 + docs/platforms.md | 6 +- docs/running.md | 27 +- hack/kind-stig.test.yaml | 56 ++++ hack/kind-stig.yaml | 56 ++++ .../testdata/Expected_output_stig.data | 266 ++++++++++++++++ job-eks-stig.yaml | 43 +++ makefile | 12 + 16 files changed, 1203 insertions(+), 2 deletions(-) create mode 100644 cfg/eks-stig-kubernetes-v1r6/config.yaml create mode 100644 cfg/eks-stig-kubernetes-v1r6/controlplane.yaml create mode 100644 cfg/eks-stig-kubernetes-v1r6/managedservices.yaml create mode 100644 cfg/eks-stig-kubernetes-v1r6/master.yaml create mode 100644 cfg/eks-stig-kubernetes-v1r6/node.yaml create mode 100644 cfg/eks-stig-kubernetes-v1r6/policies.yaml create mode 100644 hack/kind-stig.test.yaml create mode 100644 hack/kind-stig.yaml create mode 100644 integration/testdata/Expected_output_stig.data create mode 100644 job-eks-stig.yaml diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 66c3e17c7..f4b6f1eb5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -77,3 +77,4 @@ Finally, we can use the `make kind-run` target to run the current version of kub Every time you want to test a change, you'll need to rebuild the docker image and push it to cluster before running it again. ( `make build-docker kind-push kind-run` ) +To run the STIG tests locally execute the following: `make build-docker kind-push kind-run-stig` diff --git a/cfg/config.yaml b/cfg/config.yaml index fcb8bd84c..bb0e5f3ce 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -324,3 +324,8 @@ target_mapping: - "controlplane" - "policies" - "etcd" + "eks-stig-kubernetes-v1r6": + - "node" + - "controlplane" + - "policies" + - "managedservices" diff --git a/cfg/eks-stig-kubernetes-v1r6/config.yaml b/cfg/eks-stig-kubernetes-v1r6/config.yaml new file mode 100644 index 000000000..17301a751 --- /dev/null +++ b/cfg/eks-stig-kubernetes-v1r6/config.yaml @@ -0,0 +1,9 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml +## These settings are required if you are using the --asff option to report findings to AWS Security Hub +## AWS account number is required. +AWS_ACCOUNT: "" +## AWS region is required. +AWS_REGION: "" +## EKS Cluster ARN is required. +CLUSTER_ARN: "" diff --git a/cfg/eks-stig-kubernetes-v1r6/controlplane.yaml b/cfg/eks-stig-kubernetes-v1r6/controlplane.yaml new file mode 100644 index 000000000..cc3815974 --- /dev/null +++ b/cfg/eks-stig-kubernetes-v1r6/controlplane.yaml @@ -0,0 +1,124 @@ +--- +controls: +version: "eks-stig-kubernetes-v1r6" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "DISA Category Code I" + checks: + - id: V-242390 + text: "The Kubernetes API server must have anonymous authentication disabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit $kubeletconf to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242400 + text: "The Kubernetes API server must have Alpha APIs disabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "AllAlpha=true" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit any manifest files or $kubeletconf that contain the feature-gates + setting with AllAlpha set to "true". + Set the flag to "false" or remove the "AllAlpha" setting + completely. Restart the kubelet service if the kubelet config file + if the kubelet config file is changed. + scored: true + - id: 2.2 + text: "DISA Category Code II" + checks: + - id: V-242381 + text: "The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + - id: V-242402 + text: "The Kubernetes API Server must have an audit log path set (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: V-242403 + text: "Kubernetes API Server must generate audit records (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: V-242461 + text: "Kubernetes API Server audit logs must be enabled. (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: V-242462 + text: "The Kubernetes API Server must be set to audit log max size. (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: V-242463 + text: "The Kubernetes API Server must be set to audit log maximum backup. (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: V-242464 + text: "The Kubernetes API Server audit log retention must be set. (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: V-242465 + text: "The Kubernetes API Server audit log path must be set. (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: 2.2 + text: "DISA Category Code II" + checks: + - id: V-242443 + text: " Kubernetes must contain the latest updates as authorized by IAVMs, CTOs, DTMs, and STIGs. (Manual)" + type: "manual" + remediation: | + Upgrade Kubernetes to a supported version. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html diff --git a/cfg/eks-stig-kubernetes-v1r6/managedservices.yaml b/cfg/eks-stig-kubernetes-v1r6/managedservices.yaml new file mode 100644 index 000000000..23c4eaa9f --- /dev/null +++ b/cfg/eks-stig-kubernetes-v1r6/managedservices.yaml @@ -0,0 +1,268 @@ +--- +controls: +version: "eks-stig-kubernetes-v1r6" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "DISA Category Code I" + checks: + - id: V-242386 + text: "The Kubernetes API server must have the insecure port flag disabled | Component of EKS Control Plane" + type: "skip" + + - id: V-242388 + text: "The Kubernetes API server must have the insecure bind address not set | Component of EKS Control Plane" + type: "skip" + + - id: V-242436 + text: "The Kubernetes API server must have the ValidatingAdmissionWebhook enabled (manual)" + type: "manual" + remediation: | + Amazon EKS version 1.18 and later automatically enable ValidatingAdmissionWebhook + Ref: https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + scored: false + + - id: V-245542 + text: "Kubernetes API Server must disable basic authentication to protect information in transit | Component of EKS Control Plane" + type: "skip" + + - id: 5.2 + text: "DISA Category Code II" + checks: + - id: V-242376 + text: "The Kubernetes Controller Manager must use TLS 1.2, at a minimum | Component of EKS Control Plane" + type: "skip" + + - id: V-242377 + text: "The Kubernetes Scheduler must use TLS 1.2, at a minimum | Component of EKS Control Plane" + type: "skip" + + - id: V-242378 + text: "The Kubernetes API Server must use TLS 1.2, at a minimum | Component of EKS Control Plane" + type: "skip" + + - id: V-242379 + text: "The Kubernetes etcd must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of EKS Control Plane" + type: "skip" + + - id: V-242380 + text: "The Kubernetes etcd must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of EKS Control Plane" + type: "skip" + + - id: V-242382 + text: "The Kubernetes API Server must enable Node,RBAC as the authorization mode | Component of EKS Control Plane" + type: "skip" + + - id: V-242384 + text: "The Kubernetes Scheduler must have secure binding | Component of EKS Control Plane" + type: "skip" + + - id: V-242385 + text: "The Kubernetes Controller Manager must have secure binding | Component of EKS Control Plane" + type: "skip" + + - id: V-242389 + text: "The Kubernetes API server must have the secure port set | Component of EKS Control Plane" + type: "skip" + + - id: V-242401 + text: "The Kubernetes API Server must have an audit policy set | Component of EKS Control Plane" + type: "skip" + + - id: V-242402 + text: "The Kubernetes API Server must have an audit log path set | Component of EKS Control Plane" + type: "skip" + + - id: V-242403 + text: "Kubernetes API Server must generate audit records | Component of EKS Control Plane" + type: "skip" + + - id: V-242405 + text: "The Kubernetes manifests must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242408 + text: "The Kubernetes manifests must have least privileges | Component of EKS Control Plane" + type: "skip" + + - id: V-242409 + text: "Kubernetes Controller Manager must disable profiling | Component of EKS Control Plane" + type: "skip" + + - id: V-242410 + text: "The Kubernetes API Server must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane" + type: "skip" + + - id: V-242411 + text: "The Kubernetes Scheduler must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane" + type: "skip" + + - id: V-242412 + text: "The Kubernetes Controllers must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane" + type: "skip" + + - id: V-242413 + text: "The Kubernetes etcd must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane" + type: "skip" + + - id: V-242418 + text: "The Kubernetes API server must use approved cipher suites | Component of EKS Control Plane" + type: "skip" + + - id: V-242419 + text: "Kubernetes API Server must have the SSL Certificate Authority set | Component of EKS Control Plane" + type: "skip" + + - id: V-242420 + text: "Kubernetes Kubelet must have the SSL Certificate Authority set | Component of EKS Control Plane" + type: "skip" + + - id: V-242421 + text: "Kubernetes Controller Manager must have the SSL Certificate Authority set | Component of EKS Control Plane" + type: "skip" + + - id: V-242422 + text: "Kubernetes API Server must have a certificate for communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242423 + text: "Kubernetes etcd must enable client authentication to secure service | Component of EKS Control Plane" + type: "skip" + + - id: V-242424 + text: "Kubernetes etcd must enable client authentication to secure service | Component of EKS Control Plane" + type: "skip" + + - id: V-242425 + text: "Kubernetes Kubelet must enable tls-cert-file for client authentication to secure service | Component of EKS Control Plane" + type: "skip" + + - id: V-242426 + text: "Kubernetes etcd must enable client authentication to secure service | Component of EKS Control Plane" + type: "skip" + + - id: V-242427 + text: "Kubernetes etcd must have a key file for secure communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242428 + text: "Kubernetes etcd must have a certificate for communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242429 + text: "Kubernetes etcd must have the SSL Certificate Authority set | Component of EKS Control Plane" + type: "skip" + + - id: V-242430 + text: "Kubernetes etcd must have a certificate for communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242431 + text: "Kubernetes etcd must have a key file for secure communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242432 + text: "Kubernetes etcd must have peer-cert-file set for secure communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242433 + text: "Kubernetes etcd must have a peer-key-file set for secure communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242438 + text: "Kubernetes API Server must configure timeouts to limit attack surface | Component of EKS Control Plane" + type: "skip" + + - id: V-242444 + text: "The Kubernetes component manifests must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242445 + text: "The Kubernetes component etcd must be owned by etcd | Component of EKS Control Plane" + type: "skip" + + - id: V-242446 + text: "The Kubernetes conf files must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242447 + text: "The Kubernetes Kube Proxy must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242448 + text: "The Kubernetes Kube Proxy must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242449 + text: "The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242450 + text: "The Kubernetes Kubelet certificate authority must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242451 + text: "The Kubernetes component PKI must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242452 + text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242453 + text: "The Kubernetes kubelet config must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242454 + text: "The Kubernetes kubeadm.conf must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242455 + text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242456 + text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242457 + text: "The Kubernetes kubelet config must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242458 + text: "The Kubernetes API Server must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242459 + text: "The Kubernetes etcd must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242460 + text: "The Kubernetes admin.conf must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242466 + text: "The Kubernetes PKI CRT must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242467 + text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242468 + text: "The Kubernetes API Server must prohibit communication using TLS version 1.0 and 1.1, and SSL 2.0 and 3.0 | Component of EKS Control Plane" + type: "skip" + + - id: V-245541 + text: "Kubernetes Kubelet must not disable timeouts | Component of EKS Control Plane" + type: "skip" + + - id: V-245543 + text: "Kubernetes API Server must disable token authentication to protect information in transit | Component of EKS Control Plane" + type: "skip" + + - id: V-245544 + text: "Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit | Component of EKS Control Plane" + type: "skip" diff --git a/cfg/eks-stig-kubernetes-v1r6/master.yaml b/cfg/eks-stig-kubernetes-v1r6/master.yaml new file mode 100644 index 000000000..8152a1abc --- /dev/null +++ b/cfg/eks-stig-kubernetes-v1r6/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "eks-stig-kubernetes-v1r6" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/eks-stig-kubernetes-v1r6/node.yaml b/cfg/eks-stig-kubernetes-v1r6/node.yaml new file mode 100644 index 000000000..960912411 --- /dev/null +++ b/cfg/eks-stig-kubernetes-v1r6/node.yaml @@ -0,0 +1,287 @@ +--- +controls: +version: "eks-stig-kubernetes-v1r6" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "DISA Category Code I" + checks: + - id: V-242387 # CIS 3.2.4 + text: "The Kubernetes Kubelet must have the read-only port flag disabled (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit $kubeletconf to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + - id: V-242391 # CIS 3.2.1 + text: "The Kubernetes Kubelet must have anonymous authentication disabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit $kubeletconf to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242392 # CIS 3.2.2 + text: "The Kubernetes kubelet must enable explicit authorization (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit $kubeletconf to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242397 + text: "The Kubernetes kubelet static PodPath must not enable static pods (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - path: '{.staticPodPath}' + set: false + remediation: | + Edit $kubeletconf on each node to to remove the staticPodPath + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242415 + text: "Secrets in Kubernetes must not be stored as environment variables.(Manual)" + type: "manual" + remediation: | + Run the following command: + kubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}' -A + If any of the values returned reference environment variables + rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + - id: V-242434 # CIS 3.2.6 + text: "Kubernetes Kubelet must enable kernel protection (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: true + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit $kubeletconf to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242435 + text: "Kubernetes must prevent non-privileged users from executing privileged functions (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit $kubeletconf to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242393 + text: "Kubernetes Worker Nodes must not have sshd service running. (Automated)" + audit: '/bin/sh -c ''systemctl show -p ActiveState sshd'' ' + tests: + test_items: + - flag: ActiveState + compare: + op: eq + value: inactive + remediation: | + To stop the sshd service, run the command: systemctl stop sshd + scored: true + - id: V-242394 + text: "Kubernetes Worker Nodes must not have the sshd service enabled. (Automated)" + audit: "/bin/sh -c 'systemctl is-enabled sshd.service'" + tests: + test_items: + - flag: "disabled" + remediation: | + To disable the sshd service, run the command: + chkconfig sshd off + scored: true + - id: V-242395 + text: "Kubernetes dashboard must not be enabled. (Manual)" + type: "manual" + remediation: | + Run the command: kubectl get pods --all-namespaces -l k8s-app=kubernetes-dashboard + If any resources are returned, this is a finding. + Fix Text: Delete the Kubernetes dashboard deployment with the following command: + kubectl delete deployment kubernetes-dashboard --namespace=kube-system + scored: false + - id: V-242398 + text: "Kubernetes DynamicAuditing must not be enabled. (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "DynamicAuditing=true" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit any manifest files or kubelet config files that contain the feature-gates + setting with DynamicAuditing set to "true". + Set the flag to "false" or remove the "DynamicAuditing" setting + completely. Restart the kubelet service if the kubelet config file + if the kubelet config file is changed. + scored: true + - id: V-242399 + text: "Kubernetes DynamicKubeletConfig must not be enabled. (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "DynamicKubeletConfig=true" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit any manifest files or $kubeletconf that contain the feature-gates + setting with DynamicKubeletConfig set to "true". + Set the flag to "false" or remove the "DynamicKubeletConfig" setting + completely. Restart the kubelet service if the kubelet config file + if the kubelet config file is changed. + scored: true + - id: V-242404 # CIS 3.2.8 + text: "Kubernetes Kubelet must deny hostname override (Automated)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletbin + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242406 + text: "The Kubernetes kubelet configuration file must be owned by root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + - id: V-242407 + text: "The Kubernetes kubelet configuration files must have file permissions set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: true + - id: V-242414 + text: "The Kubernetes cluster must use non-privileged host ports for user pods. (Manual)" + type: "manual" + remediation: | + For any of the pods that are using ports below 1024, + reconfigure the pod to use a service to map a host non-privileged + port to the pod port or reconfigure the image to use non-privileged ports. + scored: false + - id: V-242442 + text: "Kubernetes must remove old components after updated versions have been installed. (Manual)" + type: "manual" + remediation: | + To view all pods and the images used to create the pods, from the Master node, run the following command: + kubectl get pods --all-namespaces -o jsonpath="{..image}" | \ + tr -s '[[:space:]]' '\n' | \ + sort | \ + uniq -c + Review the images used for pods running within Kubernetes. + Remove any old pods that are using older images. + scored: false + - id: V-242396 + text: "Kubernetes Kubectl cp command must give expected access and results. (Manual)" + type: "manual" + remediation: | + If any Worker nodes are not using kubectl version 1.12.9 or newer, this is a finding. + Upgrade the Master and Worker nodes to the latest version of kubectl. + scored: false diff --git a/cfg/eks-stig-kubernetes-v1r6/policies.yaml b/cfg/eks-stig-kubernetes-v1r6/policies.yaml new file mode 100644 index 000000000..e91eacc4f --- /dev/null +++ b/cfg/eks-stig-kubernetes-v1r6/policies.yaml @@ -0,0 +1,33 @@ +--- +controls: +version: "eks-stig-kubernetes-v1r6" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "Policies - DISA Category Code I" + checks: + - id: V-242381 + text: "The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: V-242383 + text: "User-managed resources must be created in dedicated namespaces. (Manual)" + type: "manual" + remediation: | + Move any user-managed resources from the default, kube-public and kube-node-lease namespaces, to user namespaces. + scored: false + + - id: V-242417 + text: "Kubernetes must separate user functionality. (Manual)" + type: "manual" + remediation: | + Move any user pods that are present in the Kubernetes system namespaces to user specific namespaces. + scored: false diff --git a/docs/architecture.md b/docs/architecture.md index 18370634e..b423a71ac 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -27,4 +27,10 @@ The following table shows the valid targets based on the CIS Benchmark version. | rh-0.7 | master,node| | rh-1.0 | master, controlplane, node, etcd, policies | +The following table shows the valid DISA STIG versions + +| STIG | Targets | +|----------------------------|---------| +| eks-stig-kubernetes-v1r6 | master, controlplane, node, policies, managedservices | + diff --git a/docs/platforms.md b/docs/platforms.md index 0883eed71..8d4e3d7eb 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -2,7 +2,10 @@ ## CIS Kubernetes Benchmark support kube-bench supports running tests for Kubernetes. -Most of our supported benchmarks are defined in the [CIS Kubernetes Benchmarks](https://www.cisecurity.org/benchmark/kubernetes/). +Most of our supported benchmarks are defined in one of the following: + [CIS Kubernetes Benchmarks](https://www.cisecurity.org/benchmark/kubernetes/) + [STIG Document Library](https://public.cyber.mil/stigs/downloads) + Some defined by other hardenening guides. | Source | Kubernetes Benchmark | kube-bench config | Kubernetes versions | @@ -18,3 +21,4 @@ Some defined by other hardenening guides. | CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | | RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | | CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | +| DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS | diff --git a/docs/running.md b/docs/running.md index 04af7bb50..11b37d61c 100644 --- a/docs/running.md +++ b/docs/running.md @@ -77,7 +77,7 @@ docker run --rm -v `pwd`:/host docker.io/aquasec/kube-bench:latest install ``` kube-bench cannot be run on AKS master nodes -### Running in an EKS cluster +### Running CIS benchmark in an EKS cluster There is a `job-eks.yaml` file for running the kube-bench node checks on an EKS cluster. The significant difference on EKS is that it's not possible to schedule jobs onto the master node, so master checks can't be performed @@ -103,6 +103,31 @@ docker push .dkr.ecr..amazonaws.com/k8s/kube-bench: 8. Retrieve the value of this Pod and output the report, note the Pod name will vary: `kubectl logs kube-bench-` - You can save the report for later reference: `kubectl logs kube-bench- > kube-bench-report.txt` +### Running DISA STIG in an EKS cluster + +There is a `job-eks-stig.yaml` file for running the kube-bench node checks on an EKS cluster. The significant difference on EKS is that it's not possible to schedule jobs onto the master node, so master checks can't be performed + +1. To create an EKS Cluster refer to [Getting Started with Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) in the *Amazon EKS User Guide* + - Information on configuring `eksctl`, `kubectl` and the AWS CLI is within +2. Create an [Amazon Elastic Container Registry (ECR)](https://docs.aws.amazon.com/AmazonECR/latest/userguide/what-is-ecr.html) repository to host the kube-bench container image +``` +aws ecr create-repository --repository-name k8s/kube-bench --image-tag-mutability MUTABLE +``` +3. Download, build and push the kube-bench container image to your ECR repo +``` +git clone https://github.com/aquasecurity/kube-bench.git +cd kube-bench +aws ecr get-login-password --region | docker login --username AWS --password-stdin .dkr.ecr..amazonaws.com +docker build -t k8s/kube-bench . +docker tag k8s/kube-bench:latest .dkr.ecr..amazonaws.com/k8s/kube-bench:latest +docker push .dkr.ecr..amazonaws.com/k8s/kube-bench:latest +``` +4. Copy the URI of your pushed image, the URI format is like this: `.dkr.ecr..amazonaws.com/k8s/kube-bench:latest` +5. Replace the `image` value in `job-eks-stig.yaml` with the URI from Step 4 +6. Run the kube-bench job on a Pod in your Cluster: `kubectl apply -f job-eks-stig.yaml` +7. Find the Pod that was created, it *should* be in the `default` namespace: `kubectl get pods --all-namespaces` +8. Retrieve the value of this Pod and output the report, note the Pod name will vary: `kubectl logs kube-bench-` + - You can save the report for later reference: `kubectl logs kube-bench- > kube-bench-report.txt` ### Running on OpenShift diff --git a/hack/kind-stig.test.yaml b/hack/kind-stig.test.yaml new file mode 100644 index 000000000..5051277c7 --- /dev/null +++ b/hack/kind-stig.test.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + metadata: + labels: + app: kube-bench + spec: + hostPID: true + containers: + - name: kube-bench + image: docker.io/aquasec/kube-bench:latest + command: [ + "kube-bench", + "run", + "--benchmark", + "eks-stig-kubernetes-v1r6", + ] + volumeMounts: + - name: var-lib-etcd + mountPath: /var/lib/etcd + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + - name: etc-systemd + mountPath: /etc/systemd + - name: etc-kubernetes + mountPath: /etc/kubernetes + # /usr/local/mount-from-host/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. + # You can omit this mount if you specify --version as part of the command. + - name: usr-bin + mountPath: /usr/local/mount-from-host/bin + - name: kind-bin + mountPath: /kind/bin + restartPolicy: Never + volumes: + - name: var-lib-etcd + hostPath: + path: "/var/lib/etcd" + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" + - name: usr-bin + hostPath: + path: "/usr/bin" + - name: kind-bin + hostPath: + path: "/kind/bin" diff --git a/hack/kind-stig.yaml b/hack/kind-stig.yaml new file mode 100644 index 000000000..3b1ab69d5 --- /dev/null +++ b/hack/kind-stig.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + metadata: + labels: + app: kube-bench + spec: + hostPID: true + containers: + - name: kube-bench + image: docker.io/aquasec/kube-bench:${VERSION} + command: [ + "kube-bench", + "run", + "--benchmark", + "eks-stig-kubernetes-v1r6", + ] + volumeMounts: + - name: var-lib-etcd + mountPath: /var/lib/etcd + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + - name: etc-systemd + mountPath: /etc/systemd + - name: etc-kubernetes + mountPath: /etc/kubernetes + # /usr/local/mount-from-host/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. + # You can omit this mount if you specify --version as part of the command. + - name: usr-bin + mountPath: /usr/local/mount-from-host/bin + - name: kind-bin + mountPath: /kind/bin + restartPolicy: Never + volumes: + - name: var-lib-etcd + hostPath: + path: "/var/lib/etcd" + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" + - name: usr-bin + hostPath: + path: "/usr/bin" + - name: kind-bin + hostPath: + path: "/kind/bin" diff --git a/integration/testdata/Expected_output_stig.data b/integration/testdata/Expected_output_stig.data new file mode 100644 index 000000000..167dd030c --- /dev/null +++ b/integration/testdata/Expected_output_stig.data @@ -0,0 +1,266 @@ +[INFO] 1 Control Plane Components + +== Summary master == +0 checks PASS +0 checks FAIL +0 checks WARN +0 checks INFO + +[INFO] 2 Control Plane Configuration +[INFO] 2.1 DISA Category Code I +[FAIL] V-242390 The Kubernetes API server must have anonymous authentication disabled (Automated) +[FAIL] V-242400 The Kubernetes API server must have Alpha APIs disabled (Automated) +[INFO] 2.2 DISA Category Code II +[WARN] V-242381 The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual) +[WARN] V-242402 The Kubernetes API Server must have an audit log path set (Manual) +[WARN] V-242403 Kubernetes API Server must generate audit records (Manual) +[WARN] V-242461 Kubernetes API Server audit logs must be enabled. (Manual) +[WARN] V-242462 The Kubernetes API Server must be set to audit log max size. (Manual) +[WARN] V-242463 The Kubernetes API Server must be set to audit log maximum backup. (Manual) +[WARN] V-242464 The Kubernetes API Server audit log retention must be set. (Manual) +[WARN] V-242465 The Kubernetes API Server audit log path must be set. (Manual) +[WARN] V-242443 Kubernetes must contain the latest updates as authorized by IAVMs, CTOs, DTMs, and STIGs. (Manual) + +== Remediations controlplane == +V-242390 If using a Kubelet config file, edit $kubeletconf to set authentication: anonymous: enabled to +false. +If using executable arguments, edit the kubelet service file +$kubeletsvc on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--anonymous-auth=false +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +V-242400 Edit any manifest files or $kubeletconf that contain the feature-gates +setting with AllAlpha set to "true". +Set the flag to "false" or remove the "AllAlpha" setting +completely. Restart the kubelet service if the kubelet config file +if the kubelet config file is changed. + +V-242381 Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +V-242402 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242403 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242461 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242462 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242463 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242464 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242465 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242443 Upgrade Kubernetes to a supported version. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html + + +== Summary controlplane == +0 checks PASS +2 checks FAIL +9 checks WARN +0 checks INFO + +[INFO] 3 Worker Node Security Configuration +[INFO] 3.1 DISA Category Code I +[WARN] V-242387 The Kubernetes Kubelet must have the read-only port flag disabled (Manual) +[PASS] V-242391 The Kubernetes Kubelet must have anonymous authentication disabled (Automated) +[PASS] V-242392 The Kubernetes kubelet must enable explicit authorization (Automated) +[FAIL] V-242397 The Kubernetes kubelet static PodPath must not enable static pods (Automated) +[WARN] V-242415 Secrets in Kubernetes must not be stored as environment variables.(Manual) +[FAIL] V-242434 Kubernetes Kubelet must enable kernel protection (Automated) +[PASS] V-242435 Kubernetes must prevent non-privileged users from executing privileged functions (Automated) +[FAIL] V-242393 Kubernetes Worker Nodes must not have sshd service running. (Automated) +[FAIL] V-242394 Kubernetes Worker Nodes must not have the sshd service enabled. (Automated) +[WARN] V-242395 Kubernetes dashboard must not be enabled. (Manual) +[PASS] V-242398 Kubernetes DynamicAuditing must not be enabled. (Automated) +[PASS] V-242399 Kubernetes DynamicKubeletConfig must not be enabled. (Automated) +[PASS] V-242404 Kubernetes Kubelet must deny hostname override (Automated) +[PASS] V-242406 The Kubernetes kubelet configuration file must be owned by root (Automated) +[PASS] V-242407 The Kubernetes kubelet configuration files must have file permissions set to 644 or more restrictive (Automated) +[WARN] V-242414 The Kubernetes cluster must use non-privileged host ports for user pods. (Manual) +[WARN] V-242442 Kubernetes must remove old components after updated versions have been installed. (Manual) +[WARN] V-242396 Kubernetes Kubectl cp command must give expected access and results. (Manual) + +== Remediations node == +V-242387 If using a Kubelet config file, edit /var/lib/kubelet/config.yaml to set readOnlyPort to 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--read-only-port=0 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +V-242397 Edit /var/lib/kubelet/config.yaml on each node to to remove the staticPodPath +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +V-242415 Run the following command: +kubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}' -A +If any of the values returned reference environment variables +rewrite application code to read secrets from mounted secret files, rather than +from environment variables. + +V-242434 If using a Kubelet config file, edit /var/lib/kubelet/config.yaml to set protectKernelDefaults: true. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--protect-kernel-defaults=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +V-242393 To stop the sshd service, run the command: systemctl stop sshd + +V-242394 To disable the sshd service, run the command: + chkconfig sshd off + +V-242395 Run the command: kubectl get pods --all-namespaces -l k8s-app=kubernetes-dashboard +If any resources are returned, this is a finding. +Fix Text: Delete the Kubernetes dashboard deployment with the following command: + kubectl delete deployment kubernetes-dashboard --namespace=kube-system + +V-242414 For any of the pods that are using ports below 1024, +reconfigure the pod to use a service to map a host non-privileged +port to the pod port or reconfigure the image to use non-privileged ports. + +V-242442 To view all pods and the images used to create the pods, from the Master node, run the following command: + kubectl get pods --all-namespaces -o jsonpath="{..image}" | \ + tr -s '[[:space:]]' '\n' | \ + sort | \ + uniq -c + Review the images used for pods running within Kubernetes. + Remove any old pods that are using older images. + +V-242396 If any Worker nodes are not using kubectl version 1.12.9 or newer, this is a finding. +Upgrade the Master and Worker nodes to the latest version of kubectl. + + +== Summary node == +8 checks PASS +4 checks FAIL +6 checks WARN +0 checks INFO + +[INFO] 4 Policies +[INFO] 4.1 Policies - DISA Category Code I +[WARN] V-242381 The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual) +[WARN] V-242383 User-managed resources must be created in dedicated namespaces. (Manual) +[WARN] V-242417 Kubernetes must separate user functionality. (Manual) + +== Remediations policies == +V-242381 Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +V-242383 Move any user-managed resources from the default, kube-public and kube-node-lease namespaces, to user namespaces. + +V-242417 Move any user pods that are present in the Kubernetes system namespaces to user specific namespaces. + + +== Summary policies == +0 checks PASS +0 checks FAIL +3 checks WARN +0 checks INFO + +[INFO] 5 Managed Services +[INFO] 5.1 DISA Category Code I +[INFO] V-242386 The Kubernetes API server must have the insecure port flag disabled | Component of EKS Control Plane +[INFO] V-242388 The Kubernetes API server must have the insecure bind address not set | Component of EKS Control Plane +[WARN] V-242436 The Kubernetes API server must have the ValidatingAdmissionWebhook enabled (manual) +[INFO] V-245542 Kubernetes API Server must disable basic authentication to protect information in transit | Component of EKS Control Plane +[INFO] 5.2 DISA Category Code II +[INFO] V-242376 The Kubernetes Controller Manager must use TLS 1.2, at a minimum | Component of EKS Control Plane +[INFO] V-242377 The Kubernetes Scheduler must use TLS 1.2, at a minimum | Component of EKS Control Plane +[INFO] V-242378 The Kubernetes API Server must use TLS 1.2, at a minimum | Component of EKS Control Plane +[INFO] V-242379 The Kubernetes etcd must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of EKS Control Plane +[INFO] V-242380 The Kubernetes etcd must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of EKS Control Plane +[INFO] V-242382 The Kubernetes API Server must enable Node,RBAC as the authorization mode | Component of EKS Control Plane +[INFO] V-242384 The Kubernetes Scheduler must have secure binding | Component of EKS Control Plane +[INFO] V-242385 The Kubernetes Controller Manager must have secure binding | Component of EKS Control Plane +[INFO] V-242389 The Kubernetes API server must have the secure port set | Component of EKS Control Plane +[INFO] V-242401 The Kubernetes API Server must have an audit policy set | Component of EKS Control Plane +[INFO] V-242402 The Kubernetes API Server must have an audit log path set | Component of EKS Control Plane +[INFO] V-242403 Kubernetes API Server must generate audit records | Component of EKS Control Plane +[INFO] V-242405 The Kubernetes manifests must be owned by root | Component of EKS Control Plane +[INFO] V-242408 The Kubernetes manifests must have least privileges | Component of EKS Control Plane +[INFO] V-242409 Kubernetes Controller Manager must disable profiling | Component of EKS Control Plane +[INFO] V-242410 The Kubernetes API Server must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane +[INFO] V-242411 The Kubernetes Scheduler must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane +[INFO] V-242412 The Kubernetes Controllers must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane +[INFO] V-242413 The Kubernetes etcd must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane +[INFO] V-242418 The Kubernetes API server must use approved cipher suites | Component of EKS Control Plane +[INFO] V-242419 Kubernetes API Server must have the SSL Certificate Authority set | Component of EKS Control Plane +[INFO] V-242420 Kubernetes Kubelet must have the SSL Certificate Authority set | Component of EKS Control Plane +[INFO] V-242421 Kubernetes Controller Manager must have the SSL Certificate Authority set | Component of EKS Control Plane +[INFO] V-242422 Kubernetes API Server must have a certificate for communication | Component of EKS Control Plane +[INFO] V-242423 Kubernetes etcd must enable client authentication to secure service | Component of EKS Control Plane +[INFO] V-242424 Kubernetes etcd must enable client authentication to secure service | Component of EKS Control Plane +[INFO] V-242425 Kubernetes Kubelet must enable tls-cert-file for client authentication to secure service | Component of EKS Control Plane +[INFO] V-242426 Kubernetes etcd must enable client authentication to secure service | Component of EKS Control Plane +[INFO] V-242427 Kubernetes etcd must have a key file for secure communication | Component of EKS Control Plane +[INFO] V-242428 Kubernetes etcd must have a certificate for communication | Component of EKS Control Plane +[INFO] V-242429 Kubernetes etcd must have the SSL Certificate Authority set | Component of EKS Control Plane +[INFO] V-242430 Kubernetes etcd must have a certificate for communication | Component of EKS Control Plane +[INFO] V-242431 Kubernetes etcd must have a key file for secure communication | Component of EKS Control Plane +[INFO] V-242432 Kubernetes etcd must have peer-cert-file set for secure communication | Component of EKS Control Plane +[INFO] V-242433 Kubernetes etcd must have a peer-key-file set for secure communication | Component of EKS Control Plane +[INFO] V-242438 Kubernetes API Server must configure timeouts to limit attack surface | Component of EKS Control Plane +[INFO] V-242444 The Kubernetes component manifests must be owned by root | Component of EKS Control Plane +[INFO] V-242445 The Kubernetes component etcd must be owned by etcd | Component of EKS Control Plane +[INFO] V-242446 The Kubernetes conf files must be owned by root | Component of EKS Control Plane +[INFO] V-242447 The Kubernetes Kube Proxy must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242448 The Kubernetes Kube Proxy must be owned by root | Component of EKS Control Plane +[INFO] V-242449 The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242450 The Kubernetes Kubelet certificate authority must be owned by root | Component of EKS Control Plane +[INFO] V-242451 The Kubernetes component PKI must be owned by root | Component of EKS Control Plane +[INFO] V-242452 The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242453 The Kubernetes kubelet config must be owned by root | Component of EKS Control Plane +[INFO] V-242454 The Kubernetes kubeadm.conf must be owned by root | Component of EKS Control Plane +[INFO] V-242455 The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242456 The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242457 The Kubernetes kubelet config must be owned by root | Component of EKS Control Plane +[INFO] V-242458 The Kubernetes API Server must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242459 The Kubernetes etcd must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242460 The Kubernetes admin.conf must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242466 The Kubernetes PKI CRT must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242467 The Kubernetes PKI keys must have file permissions set to 600 or more restrictive | Component of EKS Control Plane +[INFO] V-242468 The Kubernetes API Server must prohibit communication using TLS version 1.0 and 1.1, and SSL 2.0 and 3.0 | Component of EKS Control Plane +[INFO] V-245541 Kubernetes Kubelet must not disable timeouts | Component of EKS Control Plane +[INFO] V-245543 Kubernetes API Server must disable token authentication to protect information in transit | Component of EKS Control Plane +[INFO] V-245544 Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit | Component of EKS Control Plane + +== Remediations managedservices == +V-242436 Amazon EKS version 1.18 and later automatically enable ValidatingAdmissionWebhook +Ref: https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + + +== Summary managedservices == +0 checks PASS +0 checks FAIL +1 checks WARN +62 checks INFO + +== Summary total == +8 checks PASS +6 checks FAIL +19 checks WARN +62 checks INFO + diff --git a/job-eks-stig.yaml b/job-eks-stig.yaml new file mode 100644 index 000000000..65ce5dc9a --- /dev/null +++ b/job-eks-stig.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + spec: + hostPID: true + containers: + - name: kube-bench + # Push the image to your ECR and then refer to it here + # image: + image: docker.io/aquasec/kube-bench:latest + # To send findings to AWS Security Hub, refer to `job-eks-asff.yaml` instead + command: + [ + "kube-bench", + "run", + "--benchmark", + "eks-stig-kubernetes-v1r6", + ] + volumeMounts: + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + readOnly: true + - name: etc-systemd + mountPath: /etc/systemd + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + restartPolicy: Never + volumes: + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" diff --git a/makefile b/makefile index c60705692..61b5b54e9 100644 --- a/makefile +++ b/makefile @@ -78,3 +78,15 @@ kind-run: kind-push kubectl wait --for=condition=complete job.batch/kube-bench --timeout=60s && \ kubectl logs job/kube-bench > ./test.data && \ diff ./test.data integration/testdata/Expected_output.data + +kind-run-stig: KUBECONFIG = "./kubeconfig.kube-bench" +kind-run-stig: kind-push + sed "s/\$${VERSION}/$(VERSION)/" ./hack/kind-stig.yaml > ./hack/kind-stig.test.yaml + kind get kubeconfig --name="$(KIND_PROFILE)" > $(KUBECONFIG) + -KUBECONFIG=$(KUBECONFIG) \ + kubectl delete job kube-bench + KUBECONFIG=$(KUBECONFIG) \ + kubectl apply -f ./hack/kind-stig.test.yaml && \ + kubectl wait --for=condition=complete job.batch/kube-bench --timeout=60s && \ + kubectl logs job/kube-bench > ./test.data && \ + diff ./test.data integration/testdata/Expected_output_stig.data From 07e01cf38c54583cb21f281b97a67b0b05c8dc76 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Thu, 15 Sep 2022 14:04:54 +0800 Subject: [PATCH 099/262] Support CIS Amazon Elastic Kubernetes Service (EKS) Benchmark v1.1.0 (#1222) * Support CIS Amazon Elastic Kubernetes Service (EKS) Benchmark v1.1.0 * fix yaml lint error --- cfg/config.yaml | 7 + cfg/eks-1.1.0/config.yaml | 9 + cfg/eks-1.1.0/controlplane.yaml | 14 ++ cfg/eks-1.1.0/managedservices.yaml | 154 ++++++++++++++ cfg/eks-1.1.0/master.yaml | 6 + cfg/eks-1.1.0/node.yaml | 330 +++++++++++++++++++++++++++++ cfg/eks-1.1.0/policies.yaml | 205 ++++++++++++++++++ cmd/common_test.go | 6 + cmd/util.go | 2 +- cmd/util_test.go | 2 +- docs/architecture.md | 1 + docs/platforms.md | 1 + job-eks-asff.yaml | 4 +- job-eks.yaml | 2 +- 14 files changed, 738 insertions(+), 5 deletions(-) create mode 100644 cfg/eks-1.1.0/config.yaml create mode 100644 cfg/eks-1.1.0/controlplane.yaml create mode 100644 cfg/eks-1.1.0/managedservices.yaml create mode 100644 cfg/eks-1.1.0/master.yaml create mode 100644 cfg/eks-1.1.0/node.yaml create mode 100644 cfg/eks-1.1.0/policies.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index bb0e5f3ce..6d880ef3e 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -250,6 +250,7 @@ version_mapping: "1.22": "cis-1.23" "1.23": "cis-1.23" "eks-1.0.1": "eks-1.0.1" + "eks-1.1.0": "eks-1.1.0" "gke-1.0": "gke-1.0" "gke-1.2.0": "gke-1.2.0" "ocp-3.10": "rh-0.7" @@ -302,6 +303,12 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "eks-1.1.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" "rh-0.7": - "master" - "node" diff --git a/cfg/eks-1.1.0/config.yaml b/cfg/eks-1.1.0/config.yaml new file mode 100644 index 000000000..17301a751 --- /dev/null +++ b/cfg/eks-1.1.0/config.yaml @@ -0,0 +1,9 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml +## These settings are required if you are using the --asff option to report findings to AWS Security Hub +## AWS account number is required. +AWS_ACCOUNT: "" +## AWS region is required. +AWS_REGION: "" +## EKS Cluster ARN is required. +CLUSTER_ARN: "" diff --git a/cfg/eks-1.1.0/controlplane.yaml b/cfg/eks-1.1.0/controlplane.yaml new file mode 100644 index 000000000..b432a668d --- /dev/null +++ b/cfg/eks-1.1.0/controlplane.yaml @@ -0,0 +1,14 @@ +--- +controls: +version: "eks-1.1.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Logging" + checks: + - id: 2.1.1 + text: "Enable audit logs (Manual)" + remediation: "Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler." + scored: false diff --git a/cfg/eks-1.1.0/managedservices.yaml b/cfg/eks-1.1.0/managedservices.yaml new file mode 100644 index 000000000..50575d82e --- /dev/null +++ b/cfg/eks-1.1.0/managedservices.yaml @@ -0,0 +1,154 @@ +--- +controls: +version: "eks-1.1.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third-party provider (Manual)" + type: "manual" + remediation: | + To utilize AWS ECR for Image scanning please follow the steps below: + + To create a repository configured for scan on push (AWS CLI): + aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + To edit the settings of an existing repository (AWS CLI): + aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + Use the following steps to start a manual image scan using the AWS Management Console. + Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories. + From the navigation bar, choose the Region to create your repository in. + In the navigation pane, choose Repositories. + On the Repositories page, choose the repository that contains the image to scan. + On the Images page, select the image to scan and then choose Scan. + scored: false + + - id: 5.1.2 + text: "Minimize user access to Amazon ECR (Manual)" + type: "manual" + remediation: | + Before you use IAM to manage access to Amazon ECR, you should understand what IAM features + are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other + AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide. + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Amazon ECR (Manual)" + type: "manual" + remediation: | + You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites. + + The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess + the following IAM policy permissions for Amazon ECR. + + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken" + ], + "Resource": "*" + } + ] + } + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Prefer using dedicated Amazon EKS Service Accounts (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.3 + text: "AWS Key Management Service (KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)" + type: "manual" + remediation: | + This process can only be performed during Cluster Creation. + + Enable 'Secrets Encryption' during Amazon EKS cluster creation as described + in the links within the 'References' section. + scored: false + + - id: 5.4 + text: "Cluster Networking" + checks: + - id: 5.4.1 + text: "Restrict Access to the Control Plane Endpoint (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.2 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.3 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.4 + text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.5 + text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + + - id: 5.5 + text: "Authentication and Authorization" + checks: + - id: 5.5.1 + text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes (Manual)" + type: "manual" + remediation: | + Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation. + scored: false + + + - id: 5.6 + text: "Other Cluster Configurations" + checks: + - id: 5.6.1 + text: "Consider Fargate for running untrusted workloads (Manual)" + type: "manual" + remediation: | + Create a Fargate profile for your cluster Before you can schedule pods running on Fargate + in your cluster, you must define a Fargate profile that specifies which pods should use + Fargate when they are launched. For more information, see AWS Fargate profile. + + Note: If you created your cluster with eksctl using the --fargate option, then a Fargate profile has + already been created for your cluster with selectors for all pods in the kube-system + and default namespaces. Use the following procedure to create Fargate profiles for + any other namespaces you would like to use with Fargate. + scored: false diff --git a/cfg/eks-1.1.0/master.yaml b/cfg/eks-1.1.0/master.yaml new file mode 100644 index 000000000..51de3602b --- /dev/null +++ b/cfg/eks-1.1.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "eks-1.1.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/eks-1.1.0/node.yaml b/cfg/eks-1.1.0/node.yaml new file mode 100644 index 000000000..003be5892 --- /dev/null +++ b/cfg/eks-1.1.0/node.yaml @@ -0,0 +1,330 @@ +--- +controls: +version: "eks-1.1.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: true + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.9 + text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: true + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.11 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + - id: 3.3 + text: "Container Optimized OS" + checks: + - id: 3.3.1 + text: "Prefer using Container-Optimized OS when possible (Manual)" + remediation: "No remediation" + scored: false diff --git a/cfg/eks-1.1.0/policies.yaml b/cfg/eks-1.1.0/policies.yaml new file mode 100644 index 000000000..e4d13a1c4 --- /dev/null +++ b/cfg/eks-1.1.0/policies.yaml @@ -0,0 +1,205 @@ +--- +controls: +version: "eks-1.1.0" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 4.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.2 + text: "Pod Security Policies" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that + the .spec.privileged field is omitted or set to false. + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostPID field is omitted or set to false. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostIPC field is omitted or set to false. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostNetwork field is omitted or set to false. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.allowPrivilegeEscalation field is omitted or set to false. + scored: false + + - id: 4.2.6 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of + UIDs not including 0. + scored: false + + - id: 4.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + scored: false + + - id: 4.2.8 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in PSPs for the cluster unless + it is set to an empty array. + scored: false + + - id: 4.2.9 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 4.3 + text: "CNI Plugin" + checks: + - id: 4.3.1 + text: "Ensure that the latest CNI version is used (Manual)" + type: "manual" + remediation: | + Review the documentation of AWS CNI plugin, and ensure latest CNI version is used. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "Extensible Admission Control" + checks: [] + + - id: 4.6 + text: "General Policies" + checks: + - id: 4.6.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.6.2 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.6.3 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/common_test.go b/cmd/common_test.go index 5f22897a4..f24b80d06 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -442,6 +442,12 @@ func TestValidTargets(t *testing.T) { targets: []string{"node", "policies", "controlplane", "managedservices"}, expected: true, }, + { + name: "eks-1.1.0 valid", + benchmark: "eks-1.1.0", + targets: []string{"node", "policies", "controlplane", "managedservices"}, + expected: true, + }, } for _, c := range cases { diff --git a/cmd/util.go b/cmd/util.go index 12546f538..2ab2cca2d 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -461,7 +461,7 @@ func getPlatformBenchmarkVersion(platform Platform) string { glog.V(3).Infof("getPlatformBenchmarkVersion platform: %s", platform) switch platform.Name { case "eks": - return "eks-1.0.1" + return "eks-1.1.0" case "gke": switch platform.Version { case "1.15", "1.16", "1.17", "1.18", "1.19": diff --git a/cmd/util_test.go b/cmd/util_test.go index 79a505afe..7ced42a21 100644 --- a/cmd/util_test.go +++ b/cmd/util_test.go @@ -582,7 +582,7 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) { args: args{ platform: Platform{Name: "eks"}, }, - want: "eks-1.0.1", + want: "eks-1.1.0", }, { name: "gke 1.19", diff --git a/docs/architecture.md b/docs/architecture.md index b423a71ac..705ee2f1a 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -22,6 +22,7 @@ The following table shows the valid targets based on the CIS Benchmark version. | gke-1.0 | master, controlplane, node, etcd, policies, managedservices | | gke-1.2.0 | controlplane, node, policies, managedservices | | eks-1.0.1 | controlplane, node, policies, managedservices | +| eks-1.1.0 | controlplane, node, policies, managedservices | | ack-1.0 | master, controlplane, node, etcd, policies, managedservices | | aks-1.0 | controlplane, node, policies, managedservices | | rh-0.7 | master,node| diff --git a/docs/platforms.md b/docs/platforms.md index 8d4e3d7eb..a6527d94b 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -17,6 +17,7 @@ Some defined by other hardenening guides. | CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | | CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | | CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | +| CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS | | CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | | CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | | RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | diff --git a/job-eks-asff.yaml b/job-eks-asff.yaml index 5e079cc2a..64d91c59d 100644 --- a/job-eks-asff.yaml +++ b/job-eks-asff.yaml @@ -40,7 +40,7 @@ spec: "--targets", "node", "--benchmark", - "eks-1.0.1", + "eks-1.1.0", "--asff", ] env: @@ -59,7 +59,7 @@ spec: mountPath: /etc/kubernetes readOnly: true - name: kube-bench-eks-config - mountPath: "/opt/kube-bench/cfg/eks-1.0.1/config.yaml" + mountPath: "/opt/kube-bench/cfg/eks-1.1.0/config.yaml" subPath: config.yaml readOnly: true restartPolicy: Never diff --git a/job-eks.yaml b/job-eks.yaml index 5a946fa55..d9601839c 100644 --- a/job-eks.yaml +++ b/job-eks.yaml @@ -20,7 +20,7 @@ spec: "--targets", "node", "--benchmark", - "eks-1.0.1", + "eks-1.1.0", ] volumeMounts: - name: var-lib-kubelet From 4d76c77c6a9fcde493e99282575e9aebd70c6a9d Mon Sep 17 00:00:00 2001 From: TARI TARI <39155974+TARI0510@users.noreply.github.com> Date: Thu, 15 Sep 2022 19:26:15 +0800 Subject: [PATCH 100/262] feat(cis-1.6-k3s): Add support to CIS-1.6 for k3s distribution (#1261) * feat(cis-1.6-k3s): Add support to CIS-1.6 for k3s distribution * update(docs): change platforms and architectrue document; update(review): code review for cfg/cis-1.6-k3s; * update(docs): recover sheet style * fix(yaml-lint): CI/CD YAML Error * fix: Correct the problem of command and file/directory/log not found scene * fix(yaml-lint): CI/CD YAML Error --- cfg/cis-1.6-k3s/config.yaml | 42 ++ cfg/cis-1.6-k3s/controlplane.yaml | 40 ++ cfg/cis-1.6-k3s/etcd.yaml | 129 +++++ cfg/cis-1.6-k3s/master.yaml | 783 ++++++++++++++++++++++++++++++ cfg/cis-1.6-k3s/node.yaml | 253 ++++++++++ cfg/cis-1.6-k3s/policies.yaml | 260 ++++++++++ cfg/config.yaml | 7 + docs/architecture.md | 1 + docs/platforms.md | 1 + 9 files changed, 1516 insertions(+) create mode 100644 cfg/cis-1.6-k3s/config.yaml create mode 100644 cfg/cis-1.6-k3s/controlplane.yaml create mode 100644 cfg/cis-1.6-k3s/etcd.yaml create mode 100644 cfg/cis-1.6-k3s/master.yaml create mode 100644 cfg/cis-1.6-k3s/node.yaml create mode 100644 cfg/cis-1.6-k3s/policies.yaml diff --git a/cfg/cis-1.6-k3s/config.yaml b/cfg/cis-1.6-k3s/config.yaml new file mode 100644 index 000000000..afc6ade0d --- /dev/null +++ b/cfg/cis-1.6-k3s/config.yaml @@ -0,0 +1,42 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - scheduler + - controllermanager + - node + + scheduler: + kubeconfig: + - /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig + defaultkubeconfig: /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig + + controllermanager: + kubeconfig: + - /var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig + defaultkubeconfig: /var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig + +etcd: + components: + - etcd + etcd: + confs: + - /var/lib/rancher/k3s/server/db/etcd/config + defaultconf: /var/lib/rancher/k3s/server/db/etcd/config + +node: + components: + - proxy + - kubelet + proxy: + kubeconfig: + - "/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig" + defaultkubeconfig: "/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig" + kubelet: + kubeconfig: + - "/var/lib/rancher/k3s/agent/kubelet.kubeconfig" + defaultkubeconfig: "/var/lib/rancher/k3s/agent/kubelet.kubeconfig" + cafile: + - "/var/lib/rancher/k3s/server/tls/server-ca.crt" + defaultcafile: "/var/lib/rancher/k3s/server/tls/server-ca.crt" diff --git a/cfg/cis-1.6-k3s/controlplane.yaml b/cfg/cis-1.6-k3s/controlplane.yaml new file mode 100644 index 000000000..23b639cfe --- /dev/null +++ b/cfg/cis-1.6-k3s/controlplane.yaml @@ -0,0 +1,40 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-policy-file" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster and pass it to k3s. + e.g. --kube-apiserver-arg='audit-log-path=/var/lib/rancher/k3s/server/logs/audit-log' + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Consider modification of the audit policy in use on the cluster to include these items, at a + minimum. + scored: false diff --git a/cfg/cis-1.6-k3s/etcd.yaml b/cfg/cis-1.6-k3s/etcd.yaml new file mode 100644 index 000000000..227f4f060 --- /dev/null +++ b/cfg/cis-1.6-k3s/etcd.yaml @@ -0,0 +1,129 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration Files" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate if use etcd as database (Automated)" + audit: grep -E 'cert-file|key-file' $etcdconf + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf. + Server and peer cert and key files are specified. No manual remediation needed. + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: grep 'client-cert-auth' $etcdconf + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf. + client-cert-auth is set to true. No manual remediation needed. + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: grep 'auto-tls' $etcdconf + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + By default, K3s starts Etcd without this flag. It is set to false by default. + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: grep -A 5 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file' + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + By default, K3s starts Etcd with a config file found here, $etcdconf. + The config file contains peer-transport-security: which has fields that have the peer cert and peer key files. + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: grep 'client-cert-auth' $etcdconf + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf. + Within the file, the client-cert-auth field is set. No manual remediation needed. + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: grep 'peer-auto-tls' $etcdconf + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf. + Within the file, it does not contain the peer-auto-tls field. No manual remediation needed. + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: | + if [ -f "$etcdconf" ];then + etcd_ca=$(grep 'trusted-ca-file' $etcdconf | awk -F ":|: *" '{print $NF}'); + apiserver_ca=$(journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "trusted-ca-file" | awk -F "=" '{print $NF}') + if [ "$etcd_ca" == "$apiserver_ca" ]; then + echo 'etcd_and_apiserver_have_same_ca'; + else + echo 'etcd_and_apiserver_ca_not_same1' ; + fi + else + echo 'etcd_and_apiserver_ca_not_same'; return ; + fi + tests: + test_items: + - flag: "etcd_and_apiserver_ca_not_same" + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf + and the trusted-ca-file parameters in it are set to unique values specific to etcd. No manual remediation needed. + scored: false diff --git a/cfg/cis-1.6-k3s/master.yaml b/cfg/cis-1.6-k3s/master.yaml new file mode 100644 index 000000000..f73979be8 --- /dev/null +++ b/cfg/cis-1.6-k3s/master.yaml @@ -0,0 +1,783 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 1 +text: "Master Node Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Master Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive if etcd is used (Automated)" + audit: stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the below command: + journalctl -u k3s | grep 'Managed etcd' | grep -v grep + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/rancher/k3s/server/db/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd if etcd is used (Not Applicable)" + scored: false + + - id: 1.1.13 + text: "Ensure that the admin.kubeconfig file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chmod 644 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.kubeconfig file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.kubeconfig file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chmod 644 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.kubeconfig file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the cloud-controller.kubeconfig file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the $controllermanagerkubeconfig file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chown -R root:root /var/lib/rancher/k3s/server/tls + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Automated)" + audit: "find /var/lib/rancher/k3s/server/tls/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 644 /var/lib/rancher/k3s/server/tls/*.crt + scored: true + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: "find /var/lib/rancher/k3s/server/tls/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: true + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + By default, K3s kube-apiserver is configured to run with --anonymous-auth=false flag and value. + scored: false + + - id: 1.2.2 + text: "Ensure that the --basic-auth-file argument is not set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "basic-auth-file" + tests: + test_items: + - flag: "--basic-auth-file" + set: false + remediation: | + By default, K3s does not run with basic authentication enabled. No manual remediation is needed. + scored: true + + - id: 1.2.3 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "token-auth-file" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + By default, K3s does not run with basic authentication enabled. No manual remediation is needed. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "token-auth-file" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + By default, K3s kube-apiserver doesn't run with the --kubelet-https parameter as it runs with TLS. No manual remediation is needed. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E 'kubelet-client-certificate|kubelet-client-key' + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + By default, K3s kube-apiserver is ran with these arguments for secure communication with kubelet. No manual remediation is needed. + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "kubelet-certificate-authority" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + By default, K3s kube-apiserver is ran with this argument for secure communication with kubelet. No manual remediation is needed. + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + By default, K3s sets Node,RBAC as the parameter to the --authorization-mode argument. No manual remediation is needed. + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + By default, K3s sets Node,RBAC as the parameter to the --authorization-mode argument. No manual remediation is needed. + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + By default, K3s sets Node,RBAC as the parameter to the --authorization-mode argument. No manual remediation is needed. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + By default, K3s only sets NodeRestriction,PodSecurityPolicy as the parameter to the --enable-admission-plugins argument. + To configure this, follow the Kubernetes documentation and set the desired limits in a configuration file. + Then refer to K3s's documentation to see how to supply additional api server configuration via the kube-apiserver-arg parameter. + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s only sets NodeRestriction,PodSecurityPolicy as the parameter to the --enable-admission-plugins argument. + No manual remediation needed. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + By default, K3s only sets NodeRestriction,PodSecurityPolicy as the parameter to the --enable-admission-plugins argument. + To configure this, follow the Kubernetes documentation and set the desired limits in a configuration file. + Then refer to K3s's documentation to see how to supply additional api server configuration via the kube-apiserver-arg parameter. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + K3s would need to have the SecurityContextDeny admission plugin enabled by passing it as an argument to K3s. + --kube-apiserver-arg='enable-admission-plugins=SecurityContextDeny + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "ServiceAccount" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not use this argument. + If there's a desire to use this argument, follow the documentation and create ServiceAccount objects as per your environment. + Then refer to K3s's documentation to see how to supply additional api server configuration via the kube-apiserver-arg parameter. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "disable-admission-plugins" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not use this argument. No manual remediation needed. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin PodSecurityPolicy is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + K3s would need to have the PodSecurityPolicy admission plugin enabled by passing it as an argument to K3s. + --kube-apiserver-arg='enable-admission-plugins=PodSecurityPolicy. + scored: true + + - id: 1.2.17 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + K3s would need to have the NodeRestriction admission plugin enabled by passing it as an argument to K3s. + --kube-apiserver-arg='enable-admission-plugins=NodeRestriction. + scored: true + + - id: 1.2.18 + text: "Ensure that the --insecure-bind-address argument is not set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--insecure-bind-address" + set: false + remediation: | + By default, K3s explicitly excludes the use of the --insecure-bind-address parameter. No manual remediation is needed. + scored: true + + - id: 1.2.19 + text: "Ensure that the --insecure-port argument is set to 0 (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "insecure-port" + tests: + test_items: + - flag: "--insecure-port" + compare: + op: eq + value: 0 + remediation: | + By default, K3s starts the kube-apiserver process with this argument's parameter set to 0. No manual remediation is needed. + scored: true + + - id: 1.2.20 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "secure-port" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + By default, K3s sets the parameter of 6444 for the --secure-port argument. No manual remediation is needed. + scored: true + + - id: 1.2.21 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "profiling" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling flag parameter to false. No manual remediation needed. + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-log-path" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='audit-log-path=/path/to/log/file' + scored: true + + - id: 1.2.23 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-log-maxage" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='audit-log-maxage=30' + scored: true + + - id: 1.2.24 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-log-maxbackup" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='audit-log-maxbackup=10' + scored: true + + - id: 1.2.25 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-log-maxsize" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='audit-log-maxsize=100' + scored: true + + - id: 1.2.26 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "request-timeout" + tests: + test_items: + - flag: "--request-timeout" + compare: + op: lte + value: 60 + remediation: | + By default, K3s does not set the --request-timeout argument. No manual remediation needed. + scored: true + + - id: 1.2.27 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "service-account-lookup" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='service-account-lookup=true' + scored: true + + - id: 1.2.28 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "service-account-key-file" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + By default, K3s sets the --service-account-key-file explicitly. No manual remediation needed. + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E 'etcd-certfile|etcd-keyfile' + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + By default, K3s sets the --etcd-certfile and --etcd-keyfile arguments explicitly. No manual remediation needed. + scored: true + + - id: 1.2.30 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E 'tls-cert-file|tls-private-key-file' + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + By default, K3s sets the --tls-cert-file and --tls-private-key-file arguments explicitly. No manual remediation needed. + scored: true + + - id: 1.2.31 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + By default, K3s sets the --client-ca-file argument explicitly. No manual remediation needed. + scored: true + + - id: 1.2.32 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E 'etcd-cafile' + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + By default, K3s sets the --etcd-cafile argument explicitly. No manual remediation needed. + scored: true + + - id: 1.2.33 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E "encryption-provider-config" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + K3s server needs to be ran with the follow, --kube-apiserver-arg='encryption-provider-config=/path/to/encryption_config'. + This can be done by running k3s with the --secrets-encryptiuon argument which will configure the encryption provider. + scored: false + + - id: 1.2.34 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: manual + remediation: | + K3s server needs to be run with the following, --secrets-encryption=true, and verify that one of the allowed encryption providers is present. + Run the below command on the master node. + grep aescbc /path/to/encryption-config.json + Verify that aescbc/kms/secretbox is set as the encryption provider for all the desired resources. + scored: true + + - id: 1.2.35 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "tls-cipher-suites" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + remediation: | + By default, K3s explicitly doesn't set this flag. No manual remediation needed. + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: | + journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "terminated-pod-gc-threshold" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + K3s server needs to be run with the following, --kube-controller-manager-arg='terminated-pod-gc-threshold=10. + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "profiling" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling flag parameter to false. No manual remediation needed. + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "use-service-account-credentials" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + K3s server needs to be run with the following, --kube-controller-manager-arg='use-service-account-credentials=true' + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "service-account-private-key-file" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + By default, K3s sets the --service-account-private-key-file argument with the service account key file. No manual remediation needed. + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "root-ca-file" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + By default, K3s sets the --root-ca-file argument with the root ca file. No manual remediation needed. + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "RotateKubeletServerCertificate" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + By default, K3s implements its own logic for certificate generation and rotation. + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "bind-address" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1. No manual remediation needed. + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-scheduler" | tail -n1 | grep "profiling" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling flag parameter to false. No manual remediation needed. + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "bind-address" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1. No manual remediation needed. + scored: true diff --git a/cfg/cis-1.6-k3s/node.yaml b/cfg/cis-1.6-k3s/node.yaml new file mode 100644 index 000000000..1557b5a57 --- /dev/null +++ b/cfg/cis-1.6-k3s/node.yaml @@ -0,0 +1,253 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 4.1.3 + text: "If proxy kubeproxy.kubeconfig file exists ensure permissions are set to 644 or more restrictive (Automated)" + audit: stat -c %a $proxykubeconfig + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + By default, K3s creates $kubeletkubeconfig with 644 permissions. No manual remediation needed. + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + By default, K3s creates $kubeletkubeconfig with root:root ownership. No manual remediation needed. + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" + audit: stat -c permissions=%a $kubeletcafile + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + By default, K3s creates $kubeletcafile with 644 permissions. + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: stat -c %U:%G $kubeletcafile + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + By default, K3s creates $kubeletcafile with root:root ownership. + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the anonymous-auth argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + By default, K3s starts kubelet with --anonymous-auth set to false. No manual remediation needed. + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" + tests: + test_items: + - flag: --authorization-mode + compare: + op: nothave + value: AlwaysAllow + remediation: | + K3s starts kubelet with Webhook as the value for the --authorization-mode argument. No manual remediation needed. + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" + tests: + test_items: + - flag: --client-ca-file + remediation: | + By default, K3s starts the kubelet process with the --client-ca-file. No manual remediation needed. + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Automated)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep "read-only-port" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + compare: + op: eq + value: 0 + - flag: "--read-only-port" + set: false + remediation: | + By default, K3s starts the kubelet process with the --read-only-port argument set to 0. + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep "streaming-connection-idle-timeout" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + set: false + bin_op: or + remediation: | + By default, K3s does not set --streaming-connection-idle-timeout when starting kubelet. + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep "protect-kernel-defaults" + tests: + test_items: + - flag: --protect-kernel-defaults + compare: + op: eq + value: true + remediation: | + K3s server needs to be started with the following, --protect-kernel-defaults=true. + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep "make-iptables-util-chains" + tests: + test_items: + - flag: --make-iptables-util-chains + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + set: false + bin_op: or + remediation: | + K3s server needs to be run with the following, --kube-apiserver-arg='make-iptables-util-chains=true'. + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Not Applicable)" + scored: false + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: /bin/ps -fC containerd + tests: + test_items: + - flag: --event-qps + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep -E 'tls-cert-file|tls-private-key-file' + tests: + test_items: + - flag: --tls-cert-file + - flag: --tls-private-key-file + remediation: | + By default, K3s sets the --tls-cert-file and --tls-private-key-file arguments when executing the kubelet process. + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Not Applicable)" + scored: false + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Not Applicable)" + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Applicable)" + scored: false diff --git a/cfg/cis-1.6-k3s/policies.yaml b/cfg/cis-1.6-k3s/policies.yaml new file mode 100644 index 000000000..f6658cef3 --- /dev/null +++ b/cfg/cis-1.6-k3s/policies.yaml @@ -0,0 +1,260 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + kubectl get roles --all-namespaces -o yaml + kubectl get clusterroles -o yaml + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.2 + text: "Pod Security Policies" + checks: + - id: 5.2.1 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + kubectl describe psp | grep MustRunAsNonRoot + An operator should apply a PodSecurityPolicy that sets the Rule value to MustRunAsNonRoot. An example of this can be found in the Hardening Guide + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the hostPID value to false explicitly for the PSP it creates. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the HostIPC value to false explicitly for the PSP it creates. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the HostNetwork value to false explicitly for the PSP it creates. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the allowPrivilegeEscalation value to false explicitly for the PSP it creates. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.6 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the runAsUser.Rule value to MustRunAsNonRoot. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .spec.requiredDropCapabilities[] + An operator should apply a PodSecurityPolicy that sets .spec.requiredDropCapabilities[] to a value of All. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + kubectl get psp + An operator should apply a PodSecurityPolicy that sets allowedCapabilities to anything other than an empty array. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + kubectl get psp + An operator should apply a PodSecurityPolicy that sets requiredDropCapabilities to ALL. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + By default, K3s use Canal (Calico and Flannel) and fully supports network policies. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Run the below command on the master node. + for i in kube-system kube-public default; do + kubectl get networkpolicies -n $i; + done + Verify that there are network policies applied to each of the namespaces. + An operator should apply NetworkPolcyies that prevent unneeded traffic from traversing networks unnecessarily. An example of applying a NetworkPolcy can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + Run the following command to find references to objects which use environment variables defined from secrets. + kubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}' -A + + if possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + audit: kubectl get namespaces + type: "manual" + remediation: | + Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" + type: "manual" + remediation: | + Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you + would need to enable alpha features in the apiserver by passing "--feature- + gates=AllAlpha=true" argument. + Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS + parameter to "--feature-gates=AllAlpha=true" + KUBE_API_ARGS="--feature-gates=AllAlpha=true" + Based on your system, restart the kube-apiserver service. For example: + systemctl restart kube-apiserver.service + Use annotations to enable the docker/default seccomp profile in your pod definitions. An + example is as below: + apiVersion: v1 + kind: Pod + metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + scored: false + + - id: 5.7.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Run the below command on the master node. + kubectl get all -n default + The only entries there should be system-managed resources such as the kubernetes service. + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/config.yaml b/cfg/config.yaml index 6d880ef3e..93ae5147c 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -258,6 +258,7 @@ version_mapping: "ocp-4.0": "rh-1.0" "aks-1.0": "aks-1.0" "ack-1.0": "ack-1.0" + "cis-1.6-k3s": "cis-1.6-k3s" target_mapping: "cis-1.5": @@ -272,6 +273,12 @@ target_mapping: - "controlplane" - "etcd" - "policies" + "cis-1.6-k3s": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" "cis-1.20": - "master" - "node" diff --git a/docs/architecture.md b/docs/architecture.md index 705ee2f1a..101c5e776 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -27,6 +27,7 @@ The following table shows the valid targets based on the CIS Benchmark version. | aks-1.0 | controlplane, node, policies, managedservices | | rh-0.7 | master,node| | rh-1.0 | master, controlplane, node, etcd, policies | +| cis-1.6-k3s | master, controlplane, node, etcd, policies | The following table shows the valid DISA STIG versions diff --git a/docs/platforms.md b/docs/platforms.md index a6527d94b..98cde6231 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -22,4 +22,5 @@ Some defined by other hardenening guides. | CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | | RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | | CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | +| CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 | | DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS | From 818d57d62061d90a36866ce433fda91d5aa74cd8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 25 Sep 2022 12:55:05 +0300 Subject: [PATCH 101/262] build(deps): bump gorm.io/gorm from 1.23.8 to 1.23.9 (#1284) Bumps [gorm.io/gorm](https://github.com/go-gorm/gorm) from 1.23.8 to 1.23.9. - [Release notes](https://github.com/go-gorm/gorm/releases) - [Commits](https://github.com/go-gorm/gorm/compare/v1.23.8...v1.23.9) --- updated-dependencies: - dependency-name: gorm.io/gorm dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 548cb1e5b..561b38f65 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,6 @@ require ( github.com/stretchr/testify v1.8.0 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.8 - gorm.io/gorm v1.23.8 + gorm.io/gorm v1.23.9 k8s.io/client-go v0.24.3 ) diff --git a/go.sum b/go.sum index edcbf4427..4b9769c47 100644 --- a/go.sum +++ b/go.sum @@ -1076,8 +1076,8 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.3.8 h1:8bEphSAB69t3odsCR4NDzt581iZEWQuRM27Cg6KgfPY= gorm.io/driver/postgres v1.3.8/go.mod h1:qB98Aj6AhRO/oyu/jmZsi/YM9g6UzVCjMxO/6frFvcA= gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= -gorm.io/gorm v1.23.8 h1:h8sGJ+biDgBA1AD1Ha9gFCx7h8npU7AsLdlkX0n2TpE= -gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/gorm v1.23.9 h1:NSHG021i+MCznokeXR3udGaNyFyBQJW8MbjrJMVCfGw= +gorm.io/gorm v1.23.9/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From df687cbe0d52c4ffc6a79216f08ed5d0cce7441a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Sep 2022 10:51:06 +0300 Subject: [PATCH 102/262] build(deps): bump gorm.io/driver/postgres from 1.3.8 to 1.3.10 (#1287) Bumps [gorm.io/driver/postgres](https://github.com/go-gorm/postgres) from 1.3.8 to 1.3.10. - [Release notes](https://github.com/go-gorm/postgres/releases) - [Commits](https://github.com/go-gorm/postgres/compare/v1.3.8...v1.3.10) --- updated-dependencies: - dependency-name: gorm.io/driver/postgres dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 28 ++++++++++++++-------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 561b38f65..9ea15a534 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/spf13/viper v1.12.0 github.com/stretchr/testify v1.8.0 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.3.8 + gorm.io/driver/postgres v1.3.10 gorm.io/gorm v1.23.9 k8s.io/client-go v0.24.3 ) diff --git a/go.sum b/go.sum index 4b9769c47..f91abe7db 100644 --- a/go.sum +++ b/go.sum @@ -308,8 +308,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.12.1 h1:rsDFzIpRk7xT4B8FufgpCCeyjdNpKyghZeSefViE5W8= -github.com/jackc/pgconn v1.12.1/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono= +github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= +github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -326,26 +326,26 @@ github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvW github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.3.0 h1:brH0pCGBDkBW07HWlN/oSBXrmo3WB0UvZd1pIuDcL8Y= -github.com/jackc/pgproto3/v2 v2.3.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= +github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.11.0 h1:u4uiGPz/1hryuXzyaBhSk6dnIyyG2683olG2OV+UUgs= -github.com/jackc/pgtype v1.11.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w= +github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.16.1 h1:JzTglcal01DrghUqt+PmzWsZx/Yh7SC/CTQmSBMTd0Y= -github.com/jackc/pgx/v4 v4.16.1/go.mod h1:SIhx0D5hoADaiXZVyv+3gSm3LCIIINTVO0PficsvWGQ= +github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= +github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.4 h1:tHnRBy1i5F2Dh8BAFxqFzxKqqvezXrL2OW1TnX+Mlas= @@ -575,11 +575,11 @@ golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1073,9 +1073,9 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.3.8 h1:8bEphSAB69t3odsCR4NDzt581iZEWQuRM27Cg6KgfPY= -gorm.io/driver/postgres v1.3.8/go.mod h1:qB98Aj6AhRO/oyu/jmZsi/YM9g6UzVCjMxO/6frFvcA= -gorm.io/gorm v1.23.6/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= +gorm.io/driver/postgres v1.3.10 h1:Fsd+pQpFMGlGxxVMUPJhNo8gG8B1lKtk8QQ4/VZZAJw= +gorm.io/driver/postgres v1.3.10/go.mod h1:whNfh5WhhHs96honoLjBAMwJGYEuA3m1hvgUbNXhPCw= +gorm.io/gorm v1.23.7/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= gorm.io/gorm v1.23.9 h1:NSHG021i+MCznokeXR3udGaNyFyBQJW8MbjrJMVCfGw= gorm.io/gorm v1.23.9/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From dd39b19ffcd26d27b790916e844e3abf87b4a970 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 Sep 2022 20:56:43 +0300 Subject: [PATCH 103/262] build(deps): bump github.com/spf13/viper from 1.12.0 to 1.13.0 (#1273) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.12.0 to 1.13.0. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.12.0...v1.13.0) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 9ea15a534..27e29d777 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.5.0 - github.com/spf13/viper v1.12.0 + github.com/spf13/viper v1.13.0 github.com/stretchr/testify v1.8.0 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.10 diff --git a/go.sum b/go.sum index f91abe7db..d578e733f 100644 --- a/go.sum +++ b/go.sum @@ -448,8 +448,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= -github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -509,8 +509,8 @@ github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmq github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= -github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU= +github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -524,10 +524,11 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= -github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= +github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1054,8 +1055,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= -gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1070,7 +1071,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.3.10 h1:Fsd+pQpFMGlGxxVMUPJhNo8gG8B1lKtk8QQ4/VZZAJw= From a1e2870e83e9c2dfe0d24210712e69f8adeb6743 Mon Sep 17 00:00:00 2001 From: j-k Date: Mon, 3 Oct 2022 06:52:06 +0100 Subject: [PATCH 104/262] Migrate to aws-sdk-go-v2 (#1268) * Migrate to aws-sdk-go-v2 * Update dependencies Minimum go version increased due to k8s.io/client-go --- .github/workflows/build.yml | 10 +- .github/workflows/release.yml | 4 +- check/controls.go | 38 +- check/controls_test.go | 34 +- cmd/securityHub.go | 19 +- go.mod | 56 ++- go.sum | 518 +++------------------------- internal/findings/publisher.go | 40 +-- internal/findings/publisher_test.go | 68 ---- 9 files changed, 169 insertions(+), 618 deletions(-) delete mode 100644 internal/findings/publisher_test.go diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ea70425bd..d668fd442 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -14,7 +14,7 @@ on: - "LICENSE" - "NOTICE" env: - GO_VERSION: "1.16" + GO_VERSION: "1.19" KIND_VERSION: "v0.11.1" KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6" @@ -26,7 +26,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: 1.16 + go-version: ${{ env.GO_VERSION }} - name: Checkout code uses: actions/checkout@v3 - name: yaml-lint @@ -43,7 +43,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: 1.16 + go-version: ${{ env.GO_VERSION }} - name: Checkout code uses: actions/checkout@v3 - name: Run unit tests @@ -59,7 +59,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: 1.16 + go-version: ${{ env.GO_VERSION }} - name: Checkout code uses: actions/checkout@v3 - name: Setup Kubernetes cluster (KIND) @@ -89,7 +89,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: 1.16 + go-version: ${{ env.GO_VERSION }} - name: Checkout code uses: actions/checkout@v3 with: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 996930c83..97a4dd1bd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,7 +5,7 @@ on: tags: - "v*" env: - GO_VERSION: "1.16" + GO_VERSION: "1.19" KIND_VERSION: "v0.11.1" KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6" @@ -17,7 +17,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v3 with: - go-version: 1.16 + go-version: ${{ env.GO_VERSION }} - name: Checkout code uses: actions/checkout@v3 with: diff --git a/check/controls.go b/check/controls.go index 95c287e2e..15db8312b 100644 --- a/check/controls.go +++ b/check/controls.go @@ -21,8 +21,8 @@ import ( "fmt" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/securityhub/types" "github.com/golang/glog" "github.com/onsi/ginkgo/reporters" "github.com/spf13/viper" @@ -206,8 +206,8 @@ func (controls *Controls) JUnit() ([]byte, error) { } // ASFF encodes the results of last run to AWS Security Finding Format(ASFF). -func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { - fs := []*securityhub.AwsSecurityFinding{} +func (controls *Controls) ASFF() ([]types.AwsSecurityFinding, error) { + fs := []types.AwsSecurityFinding{} account, err := getConfig("AWS_ACCOUNT") if err != nil { return nil, err @@ -250,9 +250,9 @@ func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { id = aws.String(fmt.Sprintf("%s%sEKSnodeID+%s+%s+%s", arn, account, check.ID, cluster, nodeName)) } - f := securityhub.AwsSecurityFinding{ + f := types.AwsSecurityFinding{ AwsAccountId: aws.String(account), - Confidence: aws.Int64(100), + Confidence: *aws.Int32(100), GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", arn, controls.Version, check.ID)), Id: id, CreatedAt: aws.String(tf), @@ -261,30 +261,30 @@ func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { SchemaVersion: aws.String(SCHEMA), Title: aws.String(fmt.Sprintf("%s %s", check.ID, check.Text)), UpdatedAt: aws.String(tf), - Types: []*string{aws.String(TYPE)}, - Severity: &securityhub.Severity{ - Label: aws.String(securityhub.SeverityLabelHigh), + Types: []string{*aws.String(TYPE)}, + Severity: &types.Severity{ + Label: types.SeverityLabelHigh, }, - Remediation: &securityhub.Remediation{ - Recommendation: &securityhub.Recommendation{ + Remediation: &types.Remediation{ + Recommendation: &types.Recommendation{ Text: aws.String(remediation), }, }, - ProductFields: map[string]*string{ - "Reason": aws.String(reason), - "Actual result": aws.String(actualValue), - "Expected result": aws.String(check.ExpectedResult), - "Section": aws.String(fmt.Sprintf("%s %s", controls.ID, controls.Text)), - "Subsection": aws.String(fmt.Sprintf("%s %s", g.ID, g.Text)), + ProductFields: map[string]string{ + "Reason": reason, + "Actual result": actualValue, + "Expected result": check.ExpectedResult, + "Section": fmt.Sprintf("%s %s", controls.ID, controls.Text), + "Subsection": fmt.Sprintf("%s %s", g.ID, g.Text), }, - Resources: []*securityhub.Resource{ + Resources: []types.Resource{ { Id: aws.String(cluster), Type: aws.String(TYPE), }, }, } - fs = append(fs, &f) + fs = append(fs, f) } } } diff --git a/check/controls_test.go b/check/controls_test.go index a0015facb..5393eef21 100644 --- a/check/controls_test.go +++ b/check/controls_test.go @@ -25,8 +25,8 @@ import ( "reflect" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/securityhub/types" "github.com/onsi/ginkgo/reporters" "github.com/spf13/viper" "github.com/stretchr/testify/assert" @@ -374,7 +374,7 @@ func TestControls_ASFF(t *testing.T) { tests := []struct { name string fields fields - want []*securityhub.AwsSecurityFinding + want []types.AwsSecurityFinding wantErr bool }{ { @@ -405,32 +405,32 @@ func TestControls_ASFF(t *testing.T) { }, }, }}, - want: []*securityhub.AwsSecurityFinding{ + want: []types.AwsSecurityFinding{ { AwsAccountId: aws.String("foo account"), - Confidence: aws.Int64(100), + Confidence: *aws.Int32(100), GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", fmt.Sprintf(ARN, "somewhere"), "1", "check1id")), Description: aws.String("check1text"), ProductArn: aws.String(fmt.Sprintf(ARN, "somewhere")), SchemaVersion: aws.String(SCHEMA), Title: aws.String(fmt.Sprintf("%s %s", "check1id", "check1text")), - Types: []*string{aws.String(TYPE)}, - Severity: &securityhub.Severity{ - Label: aws.String(securityhub.SeverityLabelHigh), + Types: []string{*aws.String(TYPE)}, + Severity: &types.Severity{ + Label: types.SeverityLabelHigh, }, - Remediation: &securityhub.Remediation{ - Recommendation: &securityhub.Recommendation{ + Remediation: &types.Remediation{ + Recommendation: &types.Recommendation{ Text: aws.String("fix me"), }, }, - ProductFields: map[string]*string{ - "Reason": aws.String("failed"), - "Actual result": aws.String("failed"), - "Expected result": aws.String("failed"), - "Section": aws.String(fmt.Sprintf("%s %s", "test1", "test runnner")), - "Subsection": aws.String(fmt.Sprintf("%s %s", "g1", "Group text")), + ProductFields: map[string]string{ + "Reason": "failed", + "Actual result": "failed", + "Expected result": "failed", + "Section": fmt.Sprintf("%s %s", "test1", "test runnner"), + "Subsection": fmt.Sprintf("%s %s", "g1", "Group text"), }, - Resources: []*securityhub.Resource{ + Resources: []types.Resource{ { Id: aws.String("foo Cluster"), Type: aws.String(TYPE), diff --git a/cmd/securityHub.go b/cmd/securityHub.go index 56d6811f9..2c9d0a953 100644 --- a/cmd/securityHub.go +++ b/cmd/securityHub.go @@ -1,33 +1,32 @@ package cmd import ( + "context" "fmt" "log" "github.com/aquasecurity/kube-bench/internal/findings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/securityhub" + "github.com/aws/aws-sdk-go-v2/service/securityhub/types" "github.com/spf13/viper" ) // REGION ... const REGION = "AWS_REGION" -func writeFinding(in []*securityhub.AwsSecurityFinding) error { +func writeFinding(in []types.AwsSecurityFinding) error { r := viper.GetString(REGION) if len(r) == 0 { return fmt.Errorf("%s not set", REGION) } - sess, err := session.NewSession(&aws.Config{ - Region: aws.String(r), - }, - ) + cfg, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(r)) if err != nil { return err } - svc := securityhub.New(sess) - p := findings.New(svc) + + svc := securityhub.NewFromConfig(cfg) + p := findings.New(*svc) out, perr := p.PublishFinding(in) print(out) return perr diff --git a/go.mod b/go.mod index 27e29d777..2f49b2425 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,13 @@ module github.com/aquasecurity/kube-bench -go 1.16 +go 1.19 require ( - github.com/aws/aws-sdk-go v1.44.91 + github.com/aws/aws-sdk-go-v2 v1.16.16 + github.com/aws/aws-sdk-go-v2/config v1.17.8 + github.com/aws/aws-sdk-go-v2/service/securityhub v1.23.5 github.com/fatih/color v1.13.0 - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + github.com/golang/glog v1.0.0 github.com/magiconair/properties v1.8.6 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 @@ -14,6 +16,50 @@ require ( github.com/stretchr/testify v1.8.0 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.3.10 - gorm.io/gorm v1.23.9 - k8s.io/client-go v0.24.3 + gorm.io/gorm v1.23.10 + k8s.io/client-go v0.25.2 +) + +require ( + github.com/aws/aws-sdk-go-v2/credentials v1.12.21 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 // indirect + github.com/aws/smithy-go v1.13.3 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.13.0 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.1 // indirect + github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect + github.com/jackc/pgtype v1.12.0 // indirect + github.com/jackc/pgx/v4 v4.17.2 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.5 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.5 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spf13/afero v1.9.2 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.4.0 // indirect + github.com/subosito/gotenv v1.4.1 // indirect + golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be // indirect + golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect + golang.org/x/text v0.3.7 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index d578e733f..3e85c49b6 100644 --- a/go.sum +++ b/go.sum @@ -17,32 +17,14 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -54,132 +36,77 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.44.91 h1:SRWmuX7PTyhBdLuvSfM7KWrWISJsrRsUPcFDSFduRxY= -github.com/aws/aws-sdk-go v1.44.91/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk= +github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= +github.com/aws/aws-sdk-go-v2/config v1.17.8 h1:b9LGqNnOdg9vR4Q43tBTVWk4J6F+W774MSchvKJsqnE= +github.com/aws/aws-sdk-go-v2/config v1.17.8/go.mod h1:UkCI3kb0sCdvtjiXYiU4Zx5h07BOpgBTtkPu/49r+kA= +github.com/aws/aws-sdk-go-v2/credentials v1.12.21 h1:4tjlyCD0hRGNQivh5dN8hbP30qQhMLBE/FgQR1vHHWM= +github.com/aws/aws-sdk-go-v2/credentials v1.12.21/go.mod h1:O+4XyAt4e+oBAoIwNUYkRg3CVMscaIJdmZBOcPgJ8D8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 h1:r08j4sbZu/RVi+BNxkBJwPMUYY3P8mgSDuKkZ/ZN1lE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 h1:s4g/wnzMf+qepSNgTvaQQHNxyMLKSawNhKCPNy++2xY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 h1:/K482T5A3623WJgWT8w1yRAFK4RzGzEl7y39yhtn9eA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 h1:wj5Rwc05hvUSvKuOF29IYb9QrCLjU+rHAy/x/o0DK2c= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 h1:Jrd/oMh0PKQc6+BowB+pLEwLIgaQF29eYbe7E1Av9Ug= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.23.5 h1:jA6VOKxMvwEZSbUmidVkubHxEd5/CllfpdUSPQ7wwv4= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.23.5/go.mod h1:2nUG9O81bApzrD7ixZ//VGGXXwooek/N+EQqeXb9208= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 h1:pwvCchFUEnlceKIgPUouBJwK81aCkQ8UDMORfeFtW10= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6 h1:OwhhKc1P9ElfWbMKPIbMMZBV6hzJlL2JKD76wNNVzgQ= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 h1:9pPi0PsFNAGILFfPCk8Y0iyEBGc6lu6OQ97U7hmdesg= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM= +github.com/aws/smithy-go v1.13.3 h1:l7LYxGuzK6/K+NzJ2mC+VvLUbae0sL3bXU//04MkmnA= +github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -187,8 +114,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -203,14 +128,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -219,19 +138,11 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -242,61 +153,21 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= @@ -314,7 +185,6 @@ github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= @@ -348,159 +218,71 @@ github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dv github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.1.4 h1:tHnRBy1i5F2Dh8BAFxqFzxKqqvezXrL2OW1TnX+Mlas= github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= -github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= @@ -511,7 +293,6 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU= github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -521,66 +302,49 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/client/pkg/v3 v3.5.4/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.4/go.mod h1:Ud+VUwIi9/uQHOMA+4ekToJ12lTxlv0zB/+DHwTGEbU= -go.etcd.io/etcd/client/v3 v3.5.4/go.mod h1:ZaRkVgBZC+L+dLCjTcF1hRXpgZXQPOvnA/Ak/gq3kiY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be h1:fmw3UbQh+nxngCAHrDCCztao/kbYFnWjoqop8dHx05A= +golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -604,7 +368,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -615,11 +378,9 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -627,13 +388,10 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -651,23 +409,10 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y= -golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -677,17 +422,6 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -698,13 +432,9 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -718,20 +448,15 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -741,10 +466,7 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -753,57 +475,29 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec h1:BkDtF2Ih9xZ7le9ndzTA7KJow28VbQW3odyk/8drmuI= +golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -819,7 +513,6 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -844,11 +537,9 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -858,23 +549,14 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -894,26 +576,6 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -944,7 +606,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -952,54 +613,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1013,24 +632,9 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1041,43 +645,28 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.3.10 h1:Fsd+pQpFMGlGxxVMUPJhNo8gG8B1lKtk8QQ4/VZZAJw= gorm.io/driver/postgres v1.3.10/go.mod h1:whNfh5WhhHs96honoLjBAMwJGYEuA3m1hvgUbNXhPCw= gorm.io/gorm v1.23.7/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= -gorm.io/gorm v1.23.9 h1:NSHG021i+MCznokeXR3udGaNyFyBQJW8MbjrJMVCfGw= -gorm.io/gorm v1.23.9/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= +gorm.io/gorm v1.23.10 h1:4Ne9ZbzID9GUxRkllxN4WjJKpsHx8YbKvekVdgyWh24= +gorm.io/gorm v1.23.10/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1085,21 +674,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.24.3/go.mod h1:elGR/XSZrS7z7cSZPzVWaycpJuGIw57j9b95/1PdJNI= -k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/client-go v0.24.3 h1:Nl1840+6p4JqkFWEW2LnMKU667BUxw03REfLAVhuKQY= -k8s.io/client-go v0.24.3/go.mod h1:AAovolf5Z9bY1wIg2FZ8LPQlEdKHjLI7ZD4rw920BJw= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo= +k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/internal/findings/publisher.go b/internal/findings/publisher.go index 4661cc254..1847a77fe 100644 --- a/internal/findings/publisher.go +++ b/internal/findings/publisher.go @@ -1,14 +1,16 @@ package findings import ( - "github.com/aws/aws-sdk-go/service/securityhub" - "github.com/aws/aws-sdk-go/service/securityhub/securityhubiface" + "context" + + "github.com/aws/aws-sdk-go-v2/service/securityhub" + "github.com/aws/aws-sdk-go-v2/service/securityhub/types" "github.com/pkg/errors" ) // A Publisher represents an object that publishes finds to AWS Security Hub. type Publisher struct { - client securityhubiface.SecurityHubAPI // AWS Security Hub Service Client + client securityhub.Client // AWS Security Hub Service Client } // A PublisherOutput represents an object that contains information about the service call. @@ -16,26 +18,26 @@ type PublisherOutput struct { // The number of findings that failed to import. // // FailedCount is a required field - FailedCount int64 + FailedCount int32 // The list of findings that failed to import. - FailedFindings []*securityhub.ImportFindingsError + FailedFindings []types.ImportFindingsError // The number of findings that were successfully imported. // // SuccessCount is a required field - SuccessCount int64 + SuccessCount int32 } // New creates a new Publisher. -func New(client securityhubiface.SecurityHubAPI) *Publisher { +func New(client securityhub.Client) *Publisher { return &Publisher{ client: client, } } // PublishFinding publishes findings to AWS Security Hub Service -func (p *Publisher) PublishFinding(finding []*securityhub.AwsSecurityFinding) (*PublisherOutput, error) { +func (p *Publisher) PublishFinding(finding []types.AwsSecurityFinding) (*PublisherOutput, error) { o := PublisherOutput{} i := securityhub.BatchImportFindingsInput{} i.Findings = finding @@ -45,24 +47,20 @@ func (p *Publisher) PublishFinding(finding []*securityhub.AwsSecurityFinding) (* batch := 100 for i := 0; i < len(finding); i += batch { - j := i + batch - if j > len(finding) { - j = len(finding) - } i := securityhub.BatchImportFindingsInput{} i.Findings = finding - r, err := p.client.BatchImportFindings(&i) // Process the batch. + r, err := p.client.BatchImportFindings(context.Background(), &i) // Process the batch. if err != nil { errs = errors.Wrap(err, "finding publish failed") } - if r.FailedCount != nil { - o.FailedCount += *r.FailedCount - } - if r.SuccessCount != nil { - o.SuccessCount += *r.SuccessCount - } - for _, ff := range r.FailedFindings { - o.FailedFindings = append(o.FailedFindings, ff) + if r != nil { + if r.FailedCount != 0 { + o.FailedCount += r.FailedCount + } + if r.SuccessCount != 0 { + o.SuccessCount += r.SuccessCount + } + o.FailedFindings = append(o.FailedFindings, r.FailedFindings...) } } return &o, errs diff --git a/internal/findings/publisher_test.go b/internal/findings/publisher_test.go deleted file mode 100644 index 2a5381c2a..000000000 --- a/internal/findings/publisher_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package findings - -import ( - "testing" - - "github.com/aws/aws-sdk-go/service/securityhub" - "github.com/aws/aws-sdk-go/service/securityhub/securityhubiface" -) - -// Define a mock struct to be used in your unit tests of myFunc. -type MockSHClient struct { - securityhubiface.SecurityHubAPI - Batches int - NumberOfFinding int -} - -func NewMockSHClient() *MockSHClient { - return &MockSHClient{} -} - -func (m *MockSHClient) BatchImportFindings(input *securityhub.BatchImportFindingsInput) (*securityhub.BatchImportFindingsOutput, error) { - o := securityhub.BatchImportFindingsOutput{} - m.Batches++ - m.NumberOfFinding = len(input.Findings) - return &o, nil -} - -func TestPublisher_publishFinding(t *testing.T) { - type fields struct { - client *MockSHClient - } - type args struct { - finding []*securityhub.AwsSecurityFinding - } - tests := []struct { - name string - fields fields - args args - wantBatchCount int - wantFindingCount int - }{ - {"Test single finding", fields{NewMockSHClient()}, args{makeFindings(1)}, 1, 1}, - {"Test 150 finding should return 2 batches", fields{NewMockSHClient()}, args{makeFindings(150)}, 2, 150}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := New(tt.fields.client) - p.PublishFinding(tt.args.finding) - if tt.fields.client.NumberOfFinding != tt.wantFindingCount { - t.Errorf("Publisher.publishFinding() want = %v, got %v", tt.wantFindingCount, tt.fields.client.NumberOfFinding) - } - if tt.fields.client.Batches != tt.wantBatchCount { - t.Errorf("Publisher.publishFinding() want = %v, got %v", tt.wantBatchCount, tt.fields.client.Batches) - } - }) - } -} - -func makeFindings(count int) []*securityhub.AwsSecurityFinding { - var findings []*securityhub.AwsSecurityFinding - - for i := 0; i < count; i++ { - t := securityhub.AwsSecurityFinding{} - findings = append(findings, &t) - - } - return findings -} From 150c40b5b19b5b64b937a19a229d8f64a58d2afc Mon Sep 17 00:00:00 2001 From: Joe Bowbeer Date: Sun, 9 Oct 2022 12:37:50 -0700 Subject: [PATCH 105/262] docs: Improve Automated/Manual documentation (#1298) * docs: improve Automated/Manual documentation * sic --- docs/flags-and-commands.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/flags-and-commands.md b/docs/flags-and-commands.md index d00d9ab40..69a98886f 100644 --- a/docs/flags-and-commands.md +++ b/docs/flags-and-commands.md @@ -112,7 +112,8 @@ There are four output states: - [INFO] is informational output that needs no further action. Note: -- If the test is Manual, this always generates WARN (because the user has to run it manually) +- Some tests with `Automated` in their description must still be run manually +- If the user has to run a test manually, this always generates WARN - If the test is Scored, and kube-bench was unable to run the test, this generates FAIL (because the test has not been passed, and as a Scored test, if it doesn't pass then it must be considered a failure). - If the test is Not Scored, and kube-bench was unable to run the test, this generates WARN. - If the test is Scored, type is empty, and there are no `test_items` present, it generates a WARN. This is to highlight tests that appear to be incompletely defined. From 377a05f8721348fc227d43528e104b1f524228d8 Mon Sep 17 00:00:00 2001 From: Joe Bowbeer Date: Mon, 10 Oct 2022 12:31:36 -0700 Subject: [PATCH 106/262] docs: document --unscored flag (#1297) --- docs/flags-and-commands.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/flags-and-commands.md b/docs/flags-and-commands.md index 69a98886f..1d6605bce 100644 --- a/docs/flags-and-commands.md +++ b/docs/flags-and-commands.md @@ -29,6 +29,7 @@ Flag | Description --skip string | List of comma separated values of checks to be skipped --stderrthreshold severity | logs at or above this threshold go to stderr (default 2) -v, --v Level | log level for V logs (default 0) +--unscored | Run the unscored CIS checks (default true) --version string | Manually specify Kubernetes version, automatically detected if unset --vmodule moduleSpec | comma-separated list of pattern=N settings for file-filtered logging From 3b8379f0812a637eabe75ba8730e3722b763c393 Mon Sep 17 00:00:00 2001 From: Anupam Tamrakar Date: Tue, 11 Oct 2022 11:48:49 +0530 Subject: [PATCH 107/262] Fixing OCP checks for rh-1.0 (#1259) --- cfg/rh-1.0/controlplane.yaml | 8 ++++---- cfg/rh-1.0/etcd.yaml | 4 ++-- cfg/rh-1.0/master.yaml | 12 ++++++------ cfg/rh-1.0/node.yaml | 8 ++++---- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/cfg/rh-1.0/controlplane.yaml b/cfg/rh-1.0/controlplane.yaml index 392226390..606194ddf 100644 --- a/cfg/rh-1.0/controlplane.yaml +++ b/cfg/rh-1.0/controlplane.yaml @@ -40,9 +40,9 @@ groups: #To view openshift apiserver log files oc adm node-logs --role=master --path=openshift-apiserver/ #To verify kube apiserver audit config - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' #To verify openshift apiserver audit config - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' type: manual remediation: | No remediation required. @@ -52,9 +52,9 @@ groups: text: "Ensure that the audit policy covers key security concerns (Manual)" audit: | #To verify openshift apiserver audit config - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' #To verify kube apiserver audit config - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' type: manual remediation: | In OpenShift 4.6 and higher, if appropriate for your needs, diff --git a/cfg/rh-1.0/etcd.yaml b/cfg/rh-1.0/etcd.yaml index 13cd3fdae..d1844a283 100644 --- a/cfg/rh-1.0/etcd.yaml +++ b/cfg/rh-1.0/etcd.yaml @@ -57,7 +57,7 @@ groups: # Returns 0 if found, 1 if not found for i in $(oc get pods -oname -n openshift-etcd) do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --auto-tls=true 2>&1>/dev/null ; echo exit_code=$? + oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? done 2>/dev/null use_multiple_values: true tests: @@ -118,7 +118,7 @@ groups: # Returns 0 if found, 1 if not found for i in $(oc get pods -oname -n openshift-etcd) do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>&1>/dev/null ; echo exit_code=$? + oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? done 2>/dev/null use_multiple_values: true tests: diff --git a/cfg/rh-1.0/master.yaml b/cfg/rh-1.0/master.yaml index dfeb7ecb0..8866a42e2 100644 --- a/cfg/rh-1.0/master.yaml +++ b/cfg/rh-1.0/master.yaml @@ -129,7 +129,7 @@ groups: scored: false - id: 1.1.8 - text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" audit: | for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd ) do @@ -141,7 +141,7 @@ groups: - flag: "root:root" remediation: | No remediation required; file permissions are managed by the operator. - scored: true + scored: false - id: 1.1.9 text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" @@ -316,7 +316,7 @@ groups: scored: false - id: 1.1.19 - text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)" audit: | # Should return root:root for all files and directories for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') @@ -334,7 +334,7 @@ groups: - flag: "root:root" remediation: | No remediation required; file permissions are managed by the operator. - scored: true + scored: false - id: 1.1.20 text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)" @@ -381,9 +381,9 @@ groups: text: "Ensure that anonymous requests are authorized (Manual)" audit: | # To verify that userGroups include system:unauthenticated - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' # To verify that userGroups include system:unauthenticated - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[].userGroups' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?.userGroups' # To verify RBAC is enabled oc get clusterrolebinding oc get clusterrole diff --git a/cfg/rh-1.0/node.yaml b/cfg/rh-1.0/node.yaml index 7b5fab12f..b22dcea85 100644 --- a/cfg/rh-1.0/node.yaml +++ b/cfg/rh-1.0/node.yaml @@ -199,7 +199,7 @@ groups: scored: true - id: 4.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" type: manual # Takes a lot of time for connection to fail and audit: | @@ -215,7 +215,7 @@ groups: - flag: "Connection timed out" remediation: | None required. Unauthenticated/Unauthorized users have no access to OpenShift nodes. - scored: true + scored: false - id: 4.2.3 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" @@ -346,7 +346,7 @@ groups: scored: false - id: 4.2.9 - text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Automated)" + text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" audit: | for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); do @@ -359,7 +359,7 @@ groups: Follow the documentation to edit kubelet parameters https://docs.openshift.com/container-platform/4.5/scalability_and_performance/recommended-host-practices.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters KubeAPIQPS: - scored: true + scored: false - id: 4.2.10 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" From 9660e7842d286b0197699ea2446e85a6972b307a Mon Sep 17 00:00:00 2001 From: olcuhu <10589589+olcuhu@users.noreply.github.com> Date: Thu, 13 Oct 2022 15:17:13 +0100 Subject: [PATCH 108/262] fixed issue #1295 by making chavacava's suggestion (#1304) * fixed issue 1295 by making chavacava's suggestion & ran tests * removed outer if statement as wasn't checking anything --- cmd/kubernetes_version_test.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/cmd/kubernetes_version_test.go b/cmd/kubernetes_version_test.go index 6ae045316..9b8bc8d0c 100644 --- a/cmd/kubernetes_version_test.go +++ b/cmd/kubernetes_version_test.go @@ -264,14 +264,8 @@ func TestGetKubernetesURL(t *testing.T) { } k8sURL := getKubernetesURL() - if !c.useDefault { - if k8sURL != c.expected { - t.Errorf("Expected %q but Got %q", k8sURL, c.expected) - } - } else { - if k8sURL != c.expected { - t.Errorf("Expected %q but Got %q", k8sURL, c.expected) - } + if k8sURL != c.expected { + t.Errorf("Expected %q but Got %q", k8sURL, c.expected) } }) } From 55688aa62d36c4e6b86750dc5d7ad4ec538f2dce Mon Sep 17 00:00:00 2001 From: chenk Date: Sun, 16 Oct 2022 10:01:44 +0300 Subject: [PATCH 109/262] release: v0.6.10 (#1306) Signed-off-by: chenk Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index d7d65543b..c34c9bacb 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:v0.6.9 + image: docker.io/aquasec/kube-bench:v0.6.10 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From bb3c8e9685003006a6a604504b6cf2810ab03b8a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Oct 2022 20:24:30 +0300 Subject: [PATCH 110/262] build(deps): bump golang from 1.19.0 to 1.19.2 (#1299) Bumps golang from 1.19.0 to 1.19.2. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.ubi8 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 676f0fa4c..244e9b45f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.0 AS build +FROM golang:1.19.2 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi8 b/Dockerfile.ubi8 index 6e291b5fb..25a0435a7 100644 --- a/Dockerfile.ubi8 +++ b/Dockerfile.ubi8 @@ -1,4 +1,4 @@ -FROM golang:1.19.0 AS build +FROM golang:1.19.2 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 44eef9289ef097bbdd991c22a7aa8816f0de83fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 25 Oct 2022 09:00:50 +0300 Subject: [PATCH 111/262] build(deps): bump github.com/spf13/cobra from 1.5.0 to 1.6.0 (#1308) Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.5.0 to 1.6.0. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.5.0...v1.6.0) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 24 +++++++++++++++++++----- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 2f49b2425..bfc2882d5 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/magiconair/properties v1.8.6 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.5.0 + github.com/spf13/cobra v1.6.0 github.com/spf13/viper v1.13.0 github.com/stretchr/testify v1.8.0 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 3e85c49b6..385ed0139 100644 --- a/go.sum +++ b/go.sum @@ -38,6 +38,7 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk= github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= @@ -73,6 +74,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -89,6 +91,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= @@ -100,6 +103,7 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= @@ -139,6 +143,7 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -165,10 +170,8 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -185,10 +188,10 @@ github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= @@ -230,12 +233,15 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -253,12 +259,14 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -272,12 +280,14 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -285,8 +295,8 @@ github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= +github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -413,6 +423,7 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -557,6 +568,7 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -646,12 +658,14 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= From 3ccafa7be1a0a3d994bacfb4f0ac6e9a954a4eb3 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Thu, 24 Nov 2022 21:23:10 +0800 Subject: [PATCH 112/262] support CIS Kubernetes V1.24 Benchmark v1.0.0 (#1329) --- cfg/cis-1.24/config.yaml | 2 + cfg/cis-1.24/controlplane.yaml | 46 ++ cfg/cis-1.24/etcd.yaml | 135 +++++ cfg/cis-1.24/master.yaml | 937 +++++++++++++++++++++++++++++++++ cfg/cis-1.24/node.yaml | 462 ++++++++++++++++ cfg/cis-1.24/policies.yaml | 269 ++++++++++ cfg/config.yaml | 7 + cmd/common_test.go | 1 + docs/architecture.md | 1 + docs/platforms.md | 1 + 10 files changed, 1861 insertions(+) create mode 100644 cfg/cis-1.24/config.yaml create mode 100644 cfg/cis-1.24/controlplane.yaml create mode 100644 cfg/cis-1.24/etcd.yaml create mode 100644 cfg/cis-1.24/master.yaml create mode 100644 cfg/cis-1.24/node.yaml create mode 100644 cfg/cis-1.24/policies.yaml diff --git a/cfg/cis-1.24/config.yaml b/cfg/cis-1.24/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/cis-1.24/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/cis-1.24/controlplane.yaml b/cfg/cis-1.24/controlplane.yaml new file mode 100644 index 000000000..73e220625 --- /dev/null +++ b/cfg/cis-1.24/controlplane.yaml @@ -0,0 +1,46 @@ +--- +controls: +version: "cis-1.24" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/cis-1.24/etcd.yaml b/cfg/cis-1.24/etcd.yaml new file mode 100644 index 000000000..918069eed --- /dev/null +++ b/cfg/cis-1.24/etcd.yaml @@ -0,0 +1,135 @@ +--- +controls: +version: "cis-1.24" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/cis-1.24/master.yaml b/cfg/cis-1.24/master.yaml new file mode 100644 index 000000000..b28cb0ef5 --- /dev/null +++ b/cfg/cis-1.24/master.yaml @@ -0,0 +1,937 @@ +--- +controls: +version: "cis-1.24" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 644 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c permissions=%a + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.17 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.29 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.30 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.24/node.yaml b/cfg/cis-1.24/node.yaml new file mode 100644 index 000000000..f14817e34 --- /dev/null +++ b/cfg/cis-1.24/node.yaml @@ -0,0 +1,462 @@ +--- +controls: +version: "cis-1.24" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: false + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/cis-1.24/policies.yaml b/cfg/cis-1.24/policies.yaml new file mode 100644 index 000000000..59c9bf883 --- /dev/null +++ b/cfg/cis-1.24/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "cis-1.25" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/config.yaml b/cfg/config.yaml index 93ae5147c..ad241775e 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -249,6 +249,7 @@ version_mapping: "1.21": "cis-1.20" "1.22": "cis-1.23" "1.23": "cis-1.23" + "1.24": "cis-1.24" "eks-1.0.1": "eks-1.0.1" "eks-1.1.0": "eks-1.1.0" "gke-1.0": "gke-1.0" @@ -291,6 +292,12 @@ target_mapping: - "controlplane" - "etcd" - "policies" + "cis-1.24": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" "gke-1.0": - "master" - "node" diff --git a/cmd/common_test.go b/cmd/common_test.go index f24b80d06..a47cdd1e3 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -239,6 +239,7 @@ func TestMapToCISVersion(t *testing.T) { {kubeVersion: "1.21", succeed: true, exp: "cis-1.20"}, {kubeVersion: "1.22", succeed: true, exp: "cis-1.23"}, {kubeVersion: "1.23", succeed: true, exp: "cis-1.23"}, + {kubeVersion: "1.24", succeed: true, exp: "cis-1.24"}, {kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"}, {kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"}, {kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"}, diff --git a/docs/architecture.md b/docs/architecture.md index 101c5e776..118cb218c 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -19,6 +19,7 @@ The following table shows the valid targets based on the CIS Benchmark version. | cis-1.6 | master, controlplane, node, etcd, policies | | cis-1.20 | master, controlplane, node, etcd, policies | | cis-1.23 | master, controlplane, node, etcd, policies | +| cis-1.24 | master, controlplane, node, etcd, policies | | gke-1.0 | master, controlplane, node, etcd, policies, managedservices | | gke-1.2.0 | controlplane, node, policies, managedservices | | eks-1.0.1 | controlplane, node, policies, managedservices | diff --git a/docs/platforms.md b/docs/platforms.md index 98cde6231..2842813c3 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -14,6 +14,7 @@ Some defined by other hardenening guides. | CIS | [1.6.0](https://workbench.cisecurity.org/benchmarks/4834) | cis-1.6 | 1.16-1.18 | | CIS | [1.20](https://workbench.cisecurity.org/benchmarks/6246) | cis-1.20 | 1.19-1.21 | | CIS | [1.23](https://workbench.cisecurity.org/benchmarks/7532) | cis-1.23 | 1.22-1.23 | +| CIS | [1.24](https://workbench.cisecurity.org/benchmarks/10873) | cis-1.24 | 1.24 | | CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | | CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | | CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | From 865817dfda9c0360cc71aba36613f2070ed60781 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Fri, 25 Nov 2022 21:32:49 +0800 Subject: [PATCH 113/262] support customize datadir locations of etcd (#1330) --- cfg/cis-1.20/master.yaml | 8 ++++++- cfg/cis-1.23/master.yaml | 8 ++++++- cfg/cis-1.5/master.yaml | 16 +++++++++++-- cfg/cis-1.6/master.yaml | 16 +++++++++++-- cfg/config.yaml | 8 +++++++ cmd/common.go | 2 ++ cmd/util.go | 1 + cmd/util_test.go | 52 ++++++++++++++++++++++++++++++++++++++++ 8 files changed, 105 insertions(+), 6 deletions(-) diff --git a/cfg/cis-1.20/master.yaml b/cfg/cis-1.20/master.yaml index fcbfcd470..10e5028e3 100644 --- a/cfg/cis-1.20/master.yaml +++ b/cfg/cis-1.20/master.yaml @@ -153,7 +153,13 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c permissions=%a + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" tests: test_items: - flag: "permissions" diff --git a/cfg/cis-1.23/master.yaml b/cfg/cis-1.23/master.yaml index c77c9a1f8..8ed17afd4 100644 --- a/cfg/cis-1.23/master.yaml +++ b/cfg/cis-1.23/master.yaml @@ -147,7 +147,13 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c permissions=%a + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" tests: test_items: - flag: "permissions" diff --git a/cfg/cis-1.5/master.yaml b/cfg/cis-1.5/master.yaml index 926cc32ea..df0b491a1 100644 --- a/cfg/cis-1.5/master.yaml +++ b/cfg/cis-1.5/master.yaml @@ -158,7 +158,13 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c permissions=%a + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" tests: test_items: - flag: "permissions" @@ -176,7 +182,13 @@ groups: - id: 1.1.12 text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G $DATA_DIR tests: test_items: - flag: "etcd:etcd" diff --git a/cfg/cis-1.6/master.yaml b/cfg/cis-1.6/master.yaml index 59d5e0b0d..85ed52344 100644 --- a/cfg/cis-1.6/master.yaml +++ b/cfg/cis-1.6/master.yaml @@ -153,7 +153,13 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c permissions=%a + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" tests: test_items: - flag: "permissions" @@ -170,7 +176,13 @@ groups: - id: 1.1.12 text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G $DATA_DIR tests: test_items: - flag: "etcd:etcd" diff --git a/cfg/config.yaml b/cfg/config.yaml index ad241775e..306c87553 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -89,6 +89,9 @@ master: bins: - "etcd" - "openshift start etcd" + datadirs: + - /var/lib/etcd/default.etcd + - /var/lib/etcd/data.etcd confs: - /etc/kubernetes/manifests/etcd.yaml - /etc/kubernetes/manifests/etcd.yml @@ -99,6 +102,7 @@ master: - /var/snap/microk8s/current/args/etcd - /usr/lib/systemd/system/etcd.service defaultconf: /etc/kubernetes/manifests/etcd.yaml + defaultdatadir: /var/lib/etcd/default.etcd flanneld: optional: true @@ -211,6 +215,9 @@ etcd: etcd: bins: - "etcd" + datadirs: + - /var/lib/etcd/default.etcd + - /var/lib/etcd/data.etcd confs: - /etc/kubernetes/manifests/etcd.yaml - /etc/kubernetes/manifests/etcd.yml @@ -221,6 +228,7 @@ etcd: - /var/snap/microk8s/current/args/etcd - /usr/lib/systemd/system/etcd.service defaultconf: /etc/kubernetes/manifests/etcd.yaml + defaultdatadir: /var/lib/etcd/default.etcd controlplane: components: diff --git a/cmd/common.go b/cmd/common.go index cf29794a5..dc447e80f 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -96,6 +96,7 @@ func runChecks(nodetype check.NodeType, testYamlFile, detectedVersion string) { svcmap := getFiles(typeConf, "service") kubeconfmap := getFiles(typeConf, "kubeconfig") cafilemap := getFiles(typeConf, "ca") + datadirmap := getFiles(typeConf, "datadir") // Variable substitutions. Replace all occurrences of variables in controls files. s := string(in) @@ -104,6 +105,7 @@ func runChecks(nodetype check.NodeType, testYamlFile, detectedVersion string) { s, _ = makeSubstitutions(s, "svc", svcmap) s, _ = makeSubstitutions(s, "kubeconfig", kubeconfmap) s, _ = makeSubstitutions(s, "cafile", cafilemap) + s, _ = makeSubstitutions(s, "datadir", datadirmap) controls, err := check.NewControls(nodetype, []byte(s), detectedVersion) if err != nil { diff --git a/cmd/util.go b/cmd/util.go index 2ab2cca2d..d67d854c1 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -33,6 +33,7 @@ var ( "kubeconfig": {"kubeconfig", "defaultkubeconfig"}, "service": {"svc", "defaultsvc"}, "config": {"confs", "defaultconf"}, + "datadir": {"datadirs", "defaultdatadir"}, } ) diff --git a/cmd/util_test.go b/cmd/util_test.go index 7ced42a21..ecdd04c53 100644 --- a/cmd/util_test.go +++ b/cmd/util_test.go @@ -395,6 +395,58 @@ func TestGetServiceFiles(t *testing.T) { } } +func TestGetDatadirFiles(t *testing.T) { + var err error + datadir, err := ioutil.TempDir("", "kube-bench-test-etcd-data-dir") + if err != nil { + t.Fatalf("Failed to create temp directory") + } + defer os.RemoveAll(datadir) + + cases := []struct { + config map[string]interface{} + exp map[string]string + statResults []error + }{ + { + config: map[string]interface{}{ + "components": []string{"etcd"}, + "etcd": map[string]interface{}{"datadirs": []string{datadir}, + "defaultdatadir": "/var/lib/etcd/default.etcd"}, + }, + statResults: []error{nil}, + exp: map[string]string{"etcd": datadir}, + }, + // fallback to defaultdatadir + { + config: map[string]interface{}{ + "components": []string{"etcd"}, + "etcd": map[string]interface{}{"datadirs": []string{"/path/to/etcd/data.etcd"}, + "defaultdatadir": "/var/lib/etcd/default.etcd"}, + }, + statResults: []error{os.ErrNotExist}, + exp: map[string]string{"etcd": "/var/lib/etcd/default.etcd"}, + }, + } + + v := viper.New() + statFunc = fakestat + + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + for k, val := range c.config { + v.Set(k, val) + } + e = c.statResults + eIndex = 0 + m := getFiles(v, "datadir") + if !reflect.DeepEqual(m, c.exp) { + t.Fatalf("Got %v\nExpected %v", m, c.exp) + } + }) + } +} + func TestMakeSubsitutions(t *testing.T) { cases := []struct { input string From bd8dd3adcc17fb27cd78b543ca26fcab433ee872 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Mon, 28 Nov 2022 13:58:06 +0800 Subject: [PATCH 114/262] use $etcddatadir in more etcd related checks (#1331) --- cfg/cis-1.20/master.yaml | 8 +++++++- cfg/cis-1.23/master.yaml | 8 +++++++- cfg/cis-1.24/master.yaml | 16 ++++++++++++++-- 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/cfg/cis-1.20/master.yaml b/cfg/cis-1.20/master.yaml index 10e5028e3..a57244d6a 100644 --- a/cfg/cis-1.20/master.yaml +++ b/cfg/cis-1.20/master.yaml @@ -176,7 +176,13 @@ groups: - id: 1.1.12 text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" tests: test_items: - flag: "etcd:etcd" diff --git a/cfg/cis-1.23/master.yaml b/cfg/cis-1.23/master.yaml index 8ed17afd4..db355bb5e 100644 --- a/cfg/cis-1.23/master.yaml +++ b/cfg/cis-1.23/master.yaml @@ -169,7 +169,13 @@ groups: - id: 1.1.12 text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" tests: test_items: - flag: "etcd:etcd" diff --git a/cfg/cis-1.24/master.yaml b/cfg/cis-1.24/master.yaml index b28cb0ef5..a13333d5e 100644 --- a/cfg/cis-1.24/master.yaml +++ b/cfg/cis-1.24/master.yaml @@ -147,7 +147,13 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c permissions=%a + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" tests: test_items: - flag: "permissions" @@ -163,7 +169,13 @@ groups: - id: 1.1.12 text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" tests: test_items: - flag: "etcd:etcd" From ba23ef534afc585c8109beba543f5e69ec7324db Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Nov 2022 16:01:32 +0200 Subject: [PATCH 115/262] build(deps): bump gorm.io/driver/postgres from 1.3.10 to 1.4.5 (#1312) Bumps [gorm.io/driver/postgres](https://github.com/go-gorm/postgres) from 1.3.10 to 1.4.5. - [Release notes](https://github.com/go-gorm/postgres/releases) - [Commits](https://github.com/go-gorm/postgres/compare/v1.3.10...v1.4.5) --- updated-dependencies: - dependency-name: gorm.io/driver/postgres dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index bfc2882d5..2ec6056f1 100644 --- a/go.mod +++ b/go.mod @@ -15,8 +15,8 @@ require ( github.com/spf13/viper v1.13.0 github.com/stretchr/testify v1.8.0 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.3.10 - gorm.io/gorm v1.23.10 + gorm.io/driver/postgres v1.4.5 + gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755 k8s.io/client-go v0.25.2 ) diff --git a/go.sum b/go.sum index 385ed0139..12728d7cf 100644 --- a/go.sum +++ b/go.sum @@ -676,11 +676,10 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.3.10 h1:Fsd+pQpFMGlGxxVMUPJhNo8gG8B1lKtk8QQ4/VZZAJw= -gorm.io/driver/postgres v1.3.10/go.mod h1:whNfh5WhhHs96honoLjBAMwJGYEuA3m1hvgUbNXhPCw= -gorm.io/gorm v1.23.7/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk= -gorm.io/gorm v1.23.10 h1:4Ne9ZbzID9GUxRkllxN4WjJKpsHx8YbKvekVdgyWh24= -gorm.io/gorm v1.23.10/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= +gorm.io/driver/postgres v1.4.5 h1:mTeXTTtHAgnS9PgmhN2YeUbazYpLhUI1doLnw42XUZc= +gorm.io/driver/postgres v1.4.5/go.mod h1:GKNQYSJ14qvWkvPwXljMGehpKrhlDNsqYRr5HnYGncg= +gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755 h1:7AdrbfcvKnzejfqP5g37fdSZOXH/JvaPIzBIHTOqXKk= +gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From d5039002af06cf7d1d6a9130b1c1b5b68da7058f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Dec 2022 14:35:26 +0200 Subject: [PATCH 116/262] build(deps): bump golang from 1.19.2 to 1.19.3 (#1318) Bumps golang from 1.19.2 to 1.19.3. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.ubi8 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 244e9b45f..d0de37771 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.2 AS build +FROM golang:1.19.3 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi8 b/Dockerfile.ubi8 index 25a0435a7..9a6d3a37e 100644 --- a/Dockerfile.ubi8 +++ b/Dockerfile.ubi8 @@ -1,4 +1,4 @@ -FROM golang:1.19.2 AS build +FROM golang:1.19.3 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From f959abe0da73d9e87ac5ee56bfc9ba2ace8abc04 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Dec 2022 14:46:02 +0200 Subject: [PATCH 117/262] build(deps): bump ubi8/ubi-minimal from 8.6 to 8.7 (#1320) Bumps ubi8/ubi-minimal from 8.6 to 8.7. --- updated-dependencies: - dependency-name: ubi8/ubi-minimal dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile.ubi8 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.ubi8 b/Dockerfile.ubi8 index 9a6d3a37e..9e4897594 100644 --- a/Dockerfile.ubi8 +++ b/Dockerfile.ubi8 @@ -11,7 +11,7 @@ RUN make build && cp kube-bench /go/bin/kube-bench # ubi8-minimal base image for build with ubi standards -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.6 as run +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.7 as run RUN microdnf install yum findutils openssl\ && yum -y update-minimal --security --sec-severity=Moderate --sec-severity=Important --sec-severity=Critical \ From 98742f014e975a28601621d60a834e62ec562f21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 1 Dec 2022 15:26:59 +0200 Subject: [PATCH 118/262] build(deps): bump k8s.io/client-go from 0.25.2 to 0.25.4 (#1322) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.25.2 to 0.25.4. - [Release notes](https://github.com/kubernetes/client-go/releases) - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.25.2...v0.25.4) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2ec6056f1..7d8a5fe27 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.4.5 gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755 - k8s.io/client-go v0.25.2 + k8s.io/client-go v0.25.4 ) require ( diff --git a/go.sum b/go.sum index 12728d7cf..a51d109bd 100644 --- a/go.sum +++ b/go.sum @@ -687,8 +687,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/client-go v0.25.2 h1:SUPp9p5CwM0yXGQrwYurw9LWz+YtMwhWd0GqOsSiefo= -k8s.io/client-go v0.25.2/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4= +k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8= +k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= From e096229a5af1ad0856432fb9613d0cca91ab87d6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Dec 2022 19:06:02 +0200 Subject: [PATCH 119/262] build(deps): bump alpine from 3.16.2 to 3.17.0 (#1332) Bumps alpine from 3.16.2 to 3.17.0. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d0de37771..0db792708 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.16.2 AS run +FROM alpine:3.17.0 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 From ec51394eb748bb31b64e2a019a7e1b167ee523b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 16:50:23 +0200 Subject: [PATCH 120/262] build(deps): bump github.com/aws/aws-sdk-go-v2/config (#1337) Bumps [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) from 1.17.8 to 1.18.4. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.17.8...config/v1.18.4) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 24 ++++++++++++------------ go.sum | 44 ++++++++++++++++++++++++-------------------- 2 files changed, 36 insertions(+), 32 deletions(-) diff --git a/go.mod b/go.mod index 7d8a5fe27..b140a6ab9 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,8 @@ module github.com/aquasecurity/kube-bench go 1.19 require ( - github.com/aws/aws-sdk-go-v2 v1.16.16 - github.com/aws/aws-sdk-go-v2/config v1.17.8 + github.com/aws/aws-sdk-go-v2 v1.17.2 + github.com/aws/aws-sdk-go-v2/config v1.18.4 github.com/aws/aws-sdk-go-v2/service/securityhub v1.23.5 github.com/fatih/color v1.13.0 github.com/golang/glog v1.0.0 @@ -21,16 +21,16 @@ require ( ) require ( - github.com/aws/aws-sdk-go-v2/credentials v1.12.21 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 // indirect - github.com/aws/smithy-go v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 // indirect + github.com/aws/smithy-go v1.13.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect diff --git a/go.sum b/go.sum index a51d109bd..4fa8c08d9 100644 --- a/go.sum +++ b/go.sum @@ -40,32 +40,36 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk= github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= -github.com/aws/aws-sdk-go-v2/config v1.17.8 h1:b9LGqNnOdg9vR4Q43tBTVWk4J6F+W774MSchvKJsqnE= -github.com/aws/aws-sdk-go-v2/config v1.17.8/go.mod h1:UkCI3kb0sCdvtjiXYiU4Zx5h07BOpgBTtkPu/49r+kA= -github.com/aws/aws-sdk-go-v2/credentials v1.12.21 h1:4tjlyCD0hRGNQivh5dN8hbP30qQhMLBE/FgQR1vHHWM= -github.com/aws/aws-sdk-go-v2/credentials v1.12.21/go.mod h1:O+4XyAt4e+oBAoIwNUYkRg3CVMscaIJdmZBOcPgJ8D8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 h1:r08j4sbZu/RVi+BNxkBJwPMUYY3P8mgSDuKkZ/ZN1lE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 h1:s4g/wnzMf+qepSNgTvaQQHNxyMLKSawNhKCPNy++2xY= +github.com/aws/aws-sdk-go-v2 v1.17.2 h1:r0yRZInwiPBNpQ4aDy/Ssh3ROWsGtKDwar2JS8Lm+N8= +github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2/config v1.18.4 h1:VZKhr3uAADXHStS/Gf9xSYVmmaluTUfkc0dcbPiDsKE= +github.com/aws/aws-sdk-go-v2/config v1.18.4/go.mod h1:EZxMPLSdGAZ3eAmkqXfYbRppZJTzFTkv8VyEzJhKko4= +github.com/aws/aws-sdk-go-v2/credentials v1.13.4 h1:nEbHIyJy7mCvQ/kzGG7VWHSBpRB4H6sJy3bWierWUtg= +github.com/aws/aws-sdk-go-v2/credentials v1.13.4/go.mod h1:/Cj5w9LRsNTLSwexsohwDME32OzJ6U81Zs33zr2ZWOM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 h1:tpNOglTZ8kg9T38NpcGBxudqfUAwUzyUnLQ4XSd0CHE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20/go.mod h1:d9xFpWd3qYwdIXM0fvu7deD08vvdRXyc/ueV+0SqaWE= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 h1:/K482T5A3623WJgWT8w1yRAFK4RzGzEl7y39yhtn9eA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 h1:5WU31cY7m0tG+AiaXuXGoMzo2GBQ1IixtWa8Yywsgco= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26/go.mod h1:2E0LdbJW6lbeU4uxjum99GZzI0ZjDpAb0CoSCM0oeEY= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 h1:wj5Rwc05hvUSvKuOF29IYb9QrCLjU+rHAy/x/o0DK2c= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 h1:Jrd/oMh0PKQc6+BowB+pLEwLIgaQF29eYbe7E1Av9Ug= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 h1:WW0qSzDWoiWU2FS5DbKpxGilFVlCEJPwx4YtjdfI0Jw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20/go.mod h1:/+6lSiby8TBFpTVXZgKiN/rCfkYXEGvhlM4zCgPpt7w= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 h1:N2eKFw2S+JWRCtTt0IhIX7uoGGQciD4p6ba+SJv4WEU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27/go.mod h1:RdwFVc7PBYWY33fa2+8T1mSqQ7ZEK4ILpM0wfioDC3w= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 h1:jlgyHbkZQAgAc7VIxJDmtouH8eNjOk2REVAQfVhdaiQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20/go.mod h1:Xs52xaLBqDEKRcAfX/hgjmD3YQ7c/W+BEyfamlO/W2E= github.com/aws/aws-sdk-go-v2/service/securityhub v1.23.5 h1:jA6VOKxMvwEZSbUmidVkubHxEd5/CllfpdUSPQ7wwv4= github.com/aws/aws-sdk-go-v2/service/securityhub v1.23.5/go.mod h1:2nUG9O81bApzrD7ixZ//VGGXXwooek/N+EQqeXb9208= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 h1:pwvCchFUEnlceKIgPUouBJwK81aCkQ8UDMORfeFtW10= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6 h1:OwhhKc1P9ElfWbMKPIbMMZBV6hzJlL2JKD76wNNVzgQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 h1:9pPi0PsFNAGILFfPCk8Y0iyEBGc6lu6OQ97U7hmdesg= -github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM= -github.com/aws/smithy-go v1.13.3 h1:l7LYxGuzK6/K+NzJ2mC+VvLUbae0sL3bXU//04MkmnA= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 h1:ActQgdTNQej/RuUJjB9uxYVLDOvRGtUreXF8L3c8wyg= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.26/go.mod h1:uB9tV79ULEZUXc6Ob18A46KSQ0JDlrplPni9XW6Ot60= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 h1:wihKuqYUlA2T/Rx+yu2s6NDAns8B9DgnRooB1PVhY+Q= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9/go.mod h1:2E/3D/mB8/r2J7nK42daoKP/ooCwbf0q1PznNc+DZTU= +github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 h1:VQFOLQVL3BrKM/NLO/7FiS4vcp5bqK0mGMyk09xLoAY= +github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4R75qFYnO641RZGAoV3uH1c= github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= From 4ccffb8fddac108b11f5c98d923e7bbe724e3c6d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Dec 2022 17:00:45 +0200 Subject: [PATCH 121/262] build(deps): bump github.com/spf13/viper from 1.13.0 to 1.14.0 (#1339) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.13.0 to 1.14.0. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.13.0...v1.14.0) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 10 +++++----- go.sum | 25 ++++++++++++++----------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/go.mod b/go.mod index b140a6ab9..5a91ba256 100644 --- a/go.mod +++ b/go.mod @@ -12,8 +12,8 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.6.0 - github.com/spf13/viper v1.13.0 - github.com/stretchr/testify v1.8.0 + github.com/spf13/viper v1.14.0 + github.com/stretchr/testify v1.8.1 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.4.5 gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755 @@ -32,7 +32,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 // indirect github.com/aws/smithy-go v1.13.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect @@ -55,11 +55,11 @@ require ( github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.4.0 // indirect + github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be // indirect golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/text v0.4.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 4fa8c08d9..78667d685 100644 --- a/go.sum +++ b/go.sum @@ -98,8 +98,8 @@ github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYF github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -147,8 +147,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -305,21 +305,23 @@ github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmq github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU= -github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= +github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= +github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -427,7 +429,7 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b h1:tvrvnPFcdzp294diPnrdZZZ8XUt2Tyj7svb7X52iDuU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -495,8 +497,8 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec h1:BkDtF2Ih9xZ7le9ndzTA7KJow28VbQW3odyk/8drmuI= golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -508,8 +510,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -572,7 +575,7 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= From 8f654a9fc63f9822e1f9c22703b8547c7fc69cdf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Dec 2022 10:50:01 +0200 Subject: [PATCH 122/262] build(deps): bump github.com/spf13/cobra from 1.6.0 to 1.6.1 (#1341) Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.6.0 to 1.6.1. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.6.0...v1.6.1) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5a91ba256..3bf342699 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/magiconair/properties v1.8.6 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.6.0 + github.com/spf13/cobra v1.6.1 github.com/spf13/viper v1.14.0 github.com/stretchr/testify v1.8.1 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index 78667d685..50686c5f4 100644 --- a/go.sum +++ b/go.sum @@ -299,8 +299,8 @@ github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= -github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= From e08cf54cb034cc1fb7023e5c643d61ca7a69c595 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Dec 2022 10:58:53 +0200 Subject: [PATCH 123/262] build(deps): bump github.com/magiconair/properties from 1.8.6 to 1.8.7 (#1344) Bumps [github.com/magiconair/properties](https://github.com/magiconair/properties) from 1.8.6 to 1.8.7. - [Release notes](https://github.com/magiconair/properties/releases) - [Changelog](https://github.com/magiconair/properties/blob/main/CHANGELOG.md) - [Commits](https://github.com/magiconair/properties/compare/v1.8.6...v1.8.7) --- updated-dependencies: - dependency-name: github.com/magiconair/properties dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3bf342699..f3703e702 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/securityhub v1.23.5 github.com/fatih/color v1.13.0 github.com/golang/glog v1.0.0 - github.com/magiconair/properties v1.8.6 + github.com/magiconair/properties v1.8.7 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.6.1 diff --git a/go.sum b/go.sum index 50686c5f4..dddb0e353 100644 --- a/go.sum +++ b/go.sum @@ -247,8 +247,8 @@ github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= -github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= From 465c16fe4b65141ad6410a71848fe80f29d97a89 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Dec 2022 16:10:46 +0200 Subject: [PATCH 124/262] build(deps): bump golang from 1.19.3 to 1.19.4 (#1345) Bumps golang from 1.19.3 to 1.19.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.ubi8 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0db792708..ef8675494 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.3 AS build +FROM golang:1.19.4 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi8 b/Dockerfile.ubi8 index 9e4897594..8c84ca07d 100644 --- a/Dockerfile.ubi8 +++ b/Dockerfile.ubi8 @@ -1,4 +1,4 @@ -FROM golang:1.19.3 AS build +FROM golang:1.19.4 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 9991268c856051a8936bde01ecc9b9488daa3f06 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Dec 2022 16:45:05 +0200 Subject: [PATCH 125/262] build(deps): bump goreleaser/goreleaser-action from 3 to 4 (#1347) Bumps [goreleaser/goreleaser-action](https://github.com/goreleaser/goreleaser-action) from 3 to 4. - [Release notes](https://github.com/goreleaser/goreleaser-action/releases) - [Commits](https://github.com/goreleaser/goreleaser-action/compare/v3...v4) --- updated-dependencies: - dependency-name: goreleaser/goreleaser-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- .github/workflows/release.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d668fd442..34984193c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -95,7 +95,7 @@ jobs: with: fetch-depth: 0 - name: Dry-run release snapshot - uses: goreleaser/goreleaser-action@v3 + uses: goreleaser/goreleaser-action@v4 with: distribution: goreleaser version: v1.7.0 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 97a4dd1bd..0ec4cde92 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -44,7 +44,7 @@ jobs: second_file_path: integration/testdata/Expected_output.data expected_result: PASSED - name: Release - uses: goreleaser/goreleaser-action@v3 + uses: goreleaser/goreleaser-action@v4 with: distribution: goreleaser version: v1.7.0 From e6d0056b8e07282940fd1e663fc0d8659fce886d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 Dec 2022 20:57:17 +0200 Subject: [PATCH 126/262] build(deps): bump github.com/aws/aws-sdk-go-v2 from 1.17.2 to 1.17.3 (#1348) Bumps [github.com/aws/aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2) from 1.17.2 to 1.17.3. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.17.2...v1.17.3) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index f3703e702..d0ab10c4b 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.19 require ( - github.com/aws/aws-sdk-go-v2 v1.17.2 + github.com/aws/aws-sdk-go-v2 v1.17.3 github.com/aws/aws-sdk-go-v2/config v1.18.4 github.com/aws/aws-sdk-go-v2/service/securityhub v1.23.5 github.com/fatih/color v1.13.0 diff --git a/go.sum b/go.sum index dddb0e353..d21f1d020 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,9 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= -github.com/aws/aws-sdk-go-v2 v1.17.2 h1:r0yRZInwiPBNpQ4aDy/Ssh3ROWsGtKDwar2JS8Lm+N8= github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY= +github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2/config v1.18.4 h1:VZKhr3uAADXHStS/Gf9xSYVmmaluTUfkc0dcbPiDsKE= github.com/aws/aws-sdk-go-v2/config v1.18.4/go.mod h1:EZxMPLSdGAZ3eAmkqXfYbRppZJTzFTkv8VyEzJhKko4= github.com/aws/aws-sdk-go-v2/credentials v1.13.4 h1:nEbHIyJy7mCvQ/kzGG7VWHSBpRB4H6sJy3bWierWUtg= From 07cd55da9cf80203396fe7919424159dacc06b62 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Jan 2023 16:25:57 +0200 Subject: [PATCH 127/262] build(deps): bump k8s.io/client-go from 0.25.4 to 0.26.0 (#1354) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.25.4 to 0.26.0. - [Release notes](https://github.com/kubernetes/client-go/releases) - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.25.4...v0.26.0) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index d0ab10c4b..e0ba2595a 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.4.5 gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755 - k8s.io/client-go v0.25.4 + k8s.io/client-go v0.26.0 ) require ( @@ -58,8 +58,8 @@ require ( github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be // indirect - golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect - golang.org/x/text v0.4.0 // indirect + golang.org/x/sys v0.3.0 // indirect + golang.org/x/text v0.5.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index d21f1d020..3fc7d1941 100644 --- a/go.sum +++ b/go.sum @@ -430,7 +430,7 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b h1:tvrvnPFcdzp294diPnrdZZZ8XUt2Tyj7svb7X52iDuU= +golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -500,8 +500,8 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec h1:BkDtF2Ih9xZ7le9ndzTA7KJow28VbQW3odyk/8drmuI= -golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -512,8 +512,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -695,8 +695,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8= -k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw= +k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= +k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= From e1d10533589f5838acdb28319175bcda431429e3 Mon Sep 17 00:00:00 2001 From: Derek Nola Date: Fri, 13 Jan 2023 08:06:57 -0800 Subject: [PATCH 128/262] Fix to empty grep and other cis-1.6-k3s checks (#1352) * Fix to empty grep and other k3s checks Signed-off-by: Derek Nola * Lint fix Signed-off-by: Derek Nola Signed-off-by: Derek Nola --- cfg/cis-1.6-k3s/etcd.yaml | 44 ++++++++++++++----------------------- cfg/cis-1.6-k3s/master.yaml | 15 ++++++++----- 2 files changed, 26 insertions(+), 33 deletions(-) diff --git a/cfg/cis-1.6-k3s/etcd.yaml b/cfg/cis-1.6-k3s/etcd.yaml index 227f4f060..1cabd0a57 100644 --- a/cfg/cis-1.6-k3s/etcd.yaml +++ b/cfg/cis-1.6-k3s/etcd.yaml @@ -10,17 +10,15 @@ groups: checks: - id: 2.1 text: "Ensure that the --cert-file and --key-file arguments are set as appropriate if use etcd as database (Automated)" - audit: grep -E 'cert-file|key-file' $etcdconf + audit: grep -A 4 'client-transport-security' $etcdconf | grep -E 'cert-file|key-file' tests: bin_op: and test_items: - - flag: "--cert-file" - env: "ETCD_CERT_FILE" - - flag: "--key-file" - env: "ETCD_KEY_FILE" + - flag: "cert-file" + - flag: "key-file" remediation: | By default, K3s uses a config file for etcd that can be found at $etcdconf. - Server and peer cert and key files are specified. No manual remediation needed. + The config file contains client-transport-security: which has fields that have the peer cert and peer key files. No manual remediation needed. scored: true - id: 2.2 @@ -28,8 +26,7 @@ groups: audit: grep 'client-cert-auth' $etcdconf tests: test_items: - - flag: "--client-cert-auth" - env: "ETCD_CLIENT_CERT_AUTH" + - flag: "client-cert-auth" compare: op: eq value: true @@ -40,15 +37,13 @@ groups: - id: 2.3 text: "Ensure that the --auto-tls argument is not set to true (Automated)" - audit: grep 'auto-tls' $etcdconf + audit: grep 'auto-tls' $etcdconf | cat tests: bin_op: or test_items: - - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" + - flag: "auto-tls" set: false - - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" + - flag: "auto-tls" compare: op: eq value: false @@ -59,14 +54,12 @@ groups: - id: 2.4 text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" - audit: grep -A 5 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file' + audit: grep -A 4 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file' tests: bin_op: and test_items: - - flag: "--peer-cert-file" - env: "ETCD_PEER_CERT_FILE" - - flag: "--peer-key-file" - env: "ETCD_PEER_KEY_FILE" + - flag: "cert-file" + - flag: "key-file" remediation: | By default, K3s starts Etcd with a config file found here, $etcdconf. The config file contains peer-transport-security: which has fields that have the peer cert and peer key files. @@ -74,30 +67,27 @@ groups: - id: 2.5 text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" - audit: grep 'client-cert-auth' $etcdconf + audit: grep -A 4 'peer-transport-security' $etcdconf | grep 'client-cert-auth' tests: test_items: - - flag: "--peer-client-cert-auth" - env: "ETCD_PEER_CLIENT_CERT_AUTH" + - flag: "client-cert-auth" compare: op: eq value: true remediation: | By default, K3s uses a config file for etcd that can be found at $etcdconf. - Within the file, the client-cert-auth field is set. No manual remediation needed. + The config file contains peer-transport-security: which has client-cert-auth set to true. No manual remediation needed. scored: true - id: 2.6 text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" - audit: grep 'peer-auto-tls' $etcdconf + audit: grep 'peer-auto-tls' $etcdconf | cat tests: bin_op: or test_items: - - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" + - flag: "peer-auto-tls" set: false - - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" + - flag: "peer-auto-tls" compare: op: eq value: false diff --git a/cfg/cis-1.6-k3s/master.yaml b/cfg/cis-1.6-k3s/master.yaml index f73979be8..e0cb3e6af 100644 --- a/cfg/cis-1.6-k3s/master.yaml +++ b/cfg/cis-1.6-k3s/master.yaml @@ -213,7 +213,7 @@ groups: - id: 1.2.2 text: "Ensure that the --basic-auth-file argument is not set (Automated)" - audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "basic-auth-file" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "basic-auth-file" | cat tests: test_items: - flag: "--basic-auth-file" @@ -224,7 +224,7 @@ groups: - id: 1.2.3 text: "Ensure that the --token-auth-file parameter is not set (Automated)" - audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "token-auth-file" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "token-auth-file" | cat tests: test_items: - flag: "--token-auth-file" @@ -235,7 +235,7 @@ groups: - id: 1.2.4 text: "Ensure that the --kubelet-https argument is set to true (Automated)" - audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "token-auth-file" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "kubelet-https" | cat tests: bin_op: or test_items: @@ -396,7 +396,7 @@ groups: - id: 1.2.15 text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" - audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "disable-admission-plugins" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "disable-admission-plugins" | cat tests: bin_op: or test_items: @@ -542,9 +542,12 @@ groups: - id: 1.2.26 text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" - audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "request-timeout" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "request-timeout" | cat tests: + bin_op: or test_items: + - flag: "--request-timeout" + set: false - flag: "--request-timeout" compare: op: lte @@ -719,7 +722,7 @@ groups: - id: 1.3.6 text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "RotateKubeletServerCertificate" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "RotateKubeletServerCertificate" | cat tests: bin_op: or test_items: From b942ed3f0bb63763c3d6a7e6a64d2b8aa6b1843f Mon Sep 17 00:00:00 2001 From: Devendra Turkar Date: Thu, 2 Feb 2023 14:02:27 +0530 Subject: [PATCH 129/262] bugfix: false negative when audit_config is defined along with audit and config file not found (#1367) Suppress the file not found error only when we have audit or auditEnv is defined and they have valid output captured. As, we already have output from audit command. So we can proceed for our tests even though we didnt find config file. file not found error: `failed to run: "/test/config.yaml", output: "/bin/sh: line 1: /test/config.yaml: No such file or directory\n", error: exit status 127` Resolve: #1364 --- check/check.go | 12 ++++++++++-- check/check_test.go | 25 +++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/check/check.go b/check/check.go index 06fac0548..cec107961 100644 --- a/check/check.go +++ b/check/check.go @@ -208,6 +208,14 @@ func (c *Check) runAuditCommands() (lastCommand string, err error) { } c.AuditConfigOutput, err = runAudit(c.AuditConfig) + // when file not found then error comes as exit status 127 + if err != nil && strings.Contains(err.Error(), "exit status 127") && + (c.AuditEnvOutput != "" || c.AuditOutput != "") { + // suppress file not found error when there is Audit OR auditEnv output present + glog.V(3).Info(err) + err = nil + c.AuditConfigOutput = "" + } return c.AuditConfig, err } @@ -227,8 +235,8 @@ func (c *Check) execute() (finalOutput *testOutput, err error) { t.auditUsed = AuditCommand result := *(t.execute(c.AuditOutput)) - // Check for AuditConfigOutput only if AuditConfig is set - if !result.flagFound && c.AuditConfig != "" { + // Check for AuditConfigOutput only if AuditConfig is set and auditConfigOutput is not empty + if !result.flagFound && c.AuditConfig != "" && c.AuditConfigOutput != "" { // t.isConfigSetting = true t.auditUsed = AuditConfig result = *(t.execute(c.AuditConfigOutput)) diff --git a/check/check_test.go b/check/check_test.go index 79eb0463c..124e6f93c 100644 --- a/check/check_test.go +++ b/check/check_test.go @@ -69,6 +69,31 @@ func TestCheck_Run(t *testing.T) { }, Expected: PASS, }, + { + name: "Scored checks that pass should PASS when config file is not present", + check: Check{ + Scored: true, + Audit: "echo hello", + AuditConfig: "/test/config.yaml", + Tests: &tests{TestItems: []*testItem{{ + Flag: "hello", + Set: true, + }}}, + }, + Expected: PASS, + }, + { + name: "Scored checks that pass should FAIL when config file is not present", + check: Check{ + Scored: true, + AuditConfig: "/test/config.yaml", + Tests: &tests{TestItems: []*testItem{{ + Flag: "hello", + Set: true, + }}}, + }, + Expected: FAIL, + }, } for _, testCase := range testCases { From edff7f45a937cde54b2907bd89d21c84db1d5b7b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 4 Feb 2023 22:15:32 +0200 Subject: [PATCH 130/262] build(deps): bump gorm.io/driver/postgres from 1.4.5 to 1.4.6 (#1355) Bumps [gorm.io/driver/postgres](https://github.com/go-gorm/postgres) from 1.4.5 to 1.4.6. - [Release notes](https://github.com/go-gorm/postgres/releases) - [Commits](https://github.com/go-gorm/postgres/compare/v1.4.5...v1.4.6) --- updated-dependencies: - dependency-name: gorm.io/driver/postgres dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 15 ++---- go.sum | 154 +++++++++++++-------------------------------------------- 2 files changed, 38 insertions(+), 131 deletions(-) diff --git a/go.mod b/go.mod index e0ba2595a..b2d3c2d2e 100644 --- a/go.mod +++ b/go.mod @@ -15,8 +15,8 @@ require ( github.com/spf13/viper v1.14.0 github.com/stretchr/testify v1.8.1 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.4.5 - gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755 + gorm.io/driver/postgres v1.4.6 + gorm.io/gorm v1.24.2 k8s.io/client-go v0.26.0 ) @@ -35,14 +35,9 @@ require ( github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect - github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.13.0 // indirect - github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect - github.com/jackc/pgproto3/v2 v2.3.1 // indirect - github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect - github.com/jackc/pgtype v1.12.0 // indirect - github.com/jackc/pgx/v4 v4.17.2 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgx/v5 v5.2.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -57,7 +52,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect - golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be // indirect + golang.org/x/crypto v0.4.0 // indirect golang.org/x/sys v0.3.0 // indirect golang.org/x/text v0.5.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 3fc7d1941..7907a2f80 100644 --- a/go.sum +++ b/go.sum @@ -38,8 +38,6 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY= @@ -79,12 +77,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -104,12 +98,7 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -177,53 +166,14 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= -github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= -github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.13.0 h1:3L1XMNV2Zvca/8BYhzcRFS70Lr0WlDg16Di6SFGAbys= -github.com/jackc/pgconn v1.13.0/go.mod h1:AnowpAqO4CMIIJNZl2VJp+KrkAZciAkhEl0W0JIobpI= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.3.1 h1:nwj7qwf0S+Q7ISFfBndqeLwSwxs+4DPsbRFjECT1Y4Y= -github.com/jackc/pgproto3/v2 v2.3.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.12.0 h1:Dlq8Qvcch7kiehm8wPGIW0W3KsCCHJnRacKW0UM8n5w= -github.com/jackc/pgtype v1.12.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.17.2 h1:0Ut0rpeKwvIVbMQ1KbMBU4h6wxehBI535LK6Flheh8E= -github.com/jackc/pgx/v4 v4.17.2/go.mod h1:lcxIZN44yMIrWI78a5CpucdD14hX0SBDbNRvjDBItsw= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.2.0 h1:NdPpngX0Y6z6XDFKqmFQaE+bCtkqzvQIOt1wvBlAqs8= +github.com/jackc/pgx/v5 v5.2.0/go.mod h1:Ptn7zmohNsWEsdxRawMzk3gaKma2obW+NWTnKa0S4nk= +github.com/jackc/puddle/v2 v2.1.2/go.mod h1:2lpufsF5mRHO6SuZkm0fNYxM6SWHfvyFj62KwNzgels= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= @@ -234,29 +184,20 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= @@ -277,7 +218,6 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= @@ -286,16 +226,8 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= @@ -309,8 +241,6 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -329,39 +259,25 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be h1:fmw3UbQh+nxngCAHrDCCztao/kbYFnWjoqop8dHx05A= -golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= +golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -395,6 +311,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -408,7 +325,6 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -430,6 +346,8 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -450,25 +368,21 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -498,12 +412,15 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -512,6 +429,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -524,18 +442,14 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -543,7 +457,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -570,8 +483,7 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -666,11 +578,11 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -684,10 +596,10 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.4.5 h1:mTeXTTtHAgnS9PgmhN2YeUbazYpLhUI1doLnw42XUZc= -gorm.io/driver/postgres v1.4.5/go.mod h1:GKNQYSJ14qvWkvPwXljMGehpKrhlDNsqYRr5HnYGncg= -gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755 h1:7AdrbfcvKnzejfqP5g37fdSZOXH/JvaPIzBIHTOqXKk= -gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= +gorm.io/driver/postgres v1.4.6 h1:1FPESNXqIKG5JmraaH2bfCVlMQ7paLoCreFxDtqzwdc= +gorm.io/driver/postgres v1.4.6/go.mod h1:UJChCNLFKeBqQRE+HrkFUbKbq9idPXmTOk2u4Wok8S4= +gorm.io/gorm v1.24.2 h1:9wR6CFD+G8nOusLdvkZelOEhpJVwwHzpQOUM+REd6U0= +gorm.io/gorm v1.24.2/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From c17b4dd2ba113c0b52f2b62ecfe126a9e664baa8 Mon Sep 17 00:00:00 2001 From: chenk Date: Sun, 5 Feb 2023 11:44:23 +0200 Subject: [PATCH 131/262] release: prepare v0.6.11 (#1371) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index c34c9bacb..eb1941157 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:v0.6.10 + image: docker.io/aquasec/kube-bench:v0.6.11 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From fc72a8a620cce01b2b848ce80ecfbad3cbf75b7c Mon Sep 17 00:00:00 2001 From: Devendra Turkar Date: Tue, 14 Feb 2023 14:02:02 +0530 Subject: [PATCH 132/262] bugfix: false negative when audit_config file not found (#1376) In case of RKE, env error comes with exit status 1, so added OR codition to match with error text as well. resolve: #1364 --- check/check.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/check/check.go b/check/check.go index cec107961..58ce6fb7e 100644 --- a/check/check.go +++ b/check/check.go @@ -209,7 +209,9 @@ func (c *Check) runAuditCommands() (lastCommand string, err error) { c.AuditConfigOutput, err = runAudit(c.AuditConfig) // when file not found then error comes as exit status 127 - if err != nil && strings.Contains(err.Error(), "exit status 127") && + // in some env same error comes as exit status 1 + if err != nil && (strings.Contains(err.Error(), "exit status 127") || + strings.Contains(err.Error(), "No such file or directory")) && (c.AuditEnvOutput != "" || c.AuditOutput != "") { // suppress file not found error when there is Audit OR auditEnv output present glog.V(3).Info(err) From 823f3e1064f35ae184328be3143705e9b39aff38 Mon Sep 17 00:00:00 2001 From: chenk Date: Thu, 23 Feb 2023 09:09:31 +0200 Subject: [PATCH 133/262] release: prepare v0.6.12-rc (#1385) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index eb1941157..7f78165ef 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:v0.6.11 + image: docker.io/aquasec/kube-bench:v0.6.12-rc command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From 7d0d8ca9939bb2ee3a7d06522f70a72da8f64ec8 Mon Sep 17 00:00:00 2001 From: chenk Date: Thu, 23 Feb 2023 13:30:56 +0200 Subject: [PATCH 134/262] release: prepare v0.6.12 (#1387) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 7f78165ef..e5e5ce215 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:v0.6.12-rc + image: docker.io/aquasec/kube-bench:v0.6.12 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From 7aeb6c39774763e74979a0904e374df01844bf21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 11 Mar 2023 09:29:38 +0200 Subject: [PATCH 135/262] build(deps): bump github.com/fatih/color from 1.13.0 to 1.14.1 (#1363) Bumps [github.com/fatih/color](https://github.com/fatih/color) from 1.13.0 to 1.14.1. - [Release notes](https://github.com/fatih/color/releases) - [Commits](https://github.com/fatih/color/compare/v1.13.0...v1.14.1) --- updated-dependencies: - dependency-name: github.com/fatih/color dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 12 ++++-------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index b2d3c2d2e..a1e80d830 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/aws/aws-sdk-go-v2 v1.17.3 github.com/aws/aws-sdk-go-v2/config v1.18.4 github.com/aws/aws-sdk-go-v2/service/securityhub v1.23.5 - github.com/fatih/color v1.13.0 + github.com/fatih/color v1.14.1 github.com/golang/glog v1.0.0 github.com/magiconair/properties v1.8.7 github.com/onsi/ginkgo v1.16.5 @@ -41,7 +41,7 @@ require ( github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect diff --git a/go.sum b/go.sum index 7907a2f80..829dd8e24 100644 --- a/go.sum +++ b/go.sum @@ -88,8 +88,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -195,13 +195,11 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -387,7 +385,6 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -411,7 +408,6 @@ golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From 0decc8a53f9d9e70e1cfafe0753668e2da84a1b3 Mon Sep 17 00:00:00 2001 From: Jack Henschel Date: Sat, 18 Mar 2023 18:30:19 +0100 Subject: [PATCH 136/262] docs: Clarify how to run Job on OpenShift (#1401) Signed-off-by: Jack Henschel --- docs/running.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/running.md b/docs/running.md index 11b37d61c..67c5e5b0c 100644 --- a/docs/running.md +++ b/docs/running.md @@ -34,7 +34,7 @@ docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -t -v path/to/my-config.ya You can run kube-bench inside a pod, but it will need access to the host's PID namespace in order to check the running processes, as well as access to some directories on the host where config files and other files are stored. -The supplied `job.yaml` file can be applied to run the tests as a job. For example: +The `job.yaml` file (available in the root directory of the repository) can be applied to run the tests as a Kubernetes `Job`. For example: ```bash $ kubectl apply -f job.yaml @@ -140,6 +140,14 @@ kube-bench includes a set of test files for Red Hat's OpenShift hardening guide `kube-bench` supports auto-detection, when you run the `kube-bench` command it will autodetect if running in openshift environment. +Since running `kube-bench` requires elevated privileges, the `privileged` SecurityContextConstraint needs to be applied to the ServiceAccount used for the `Job`: + +``` +oc create namespace kube-bench +oc adm policy add-scc-to-user privileged --serviceaccount default +oc apply -f job.yaml +``` + ### Running in a GKE cluster | CIS Benchmark | Targets | From 9e41099cec7d5458d8b81f055414bbc0ddb456ed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 25 Mar 2023 12:34:54 +0300 Subject: [PATCH 137/262] build(deps): bump github.com/aws/aws-sdk-go-v2/service/securityhub (#1397) Bumps [github.com/aws/aws-sdk-go-v2/service/securityhub](https://github.com/aws/aws-sdk-go-v2) from 1.23.5 to 1.29.1. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ecs/v1.23.5...service/s3/v1.29.1) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/service/securityhub dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 8 ++++---- go.sum | 18 ++++++++---------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index a1e80d830..1b650829d 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/aquasecurity/kube-bench go 1.19 require ( - github.com/aws/aws-sdk-go-v2 v1.17.3 + github.com/aws/aws-sdk-go-v2 v1.17.6 github.com/aws/aws-sdk-go-v2/config v1.18.4 - github.com/aws/aws-sdk-go-v2/service/securityhub v1.23.5 + github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 github.com/fatih/color v1.14.1 github.com/golang/glog v1.0.0 github.com/magiconair/properties v1.8.7 @@ -23,8 +23,8 @@ require ( require ( github.com/aws/aws-sdk-go-v2/credentials v1.13.4 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 // indirect diff --git a/go.sum b/go.sum index 829dd8e24..069c3883d 100644 --- a/go.sum +++ b/go.sum @@ -38,35 +38,33 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k= github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY= -github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.17.6 h1:Y773UK7OBqhzi5VDXMi1zVGsoj+CVHs2eaC2bDsLwi0= +github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2/config v1.18.4 h1:VZKhr3uAADXHStS/Gf9xSYVmmaluTUfkc0dcbPiDsKE= github.com/aws/aws-sdk-go-v2/config v1.18.4/go.mod h1:EZxMPLSdGAZ3eAmkqXfYbRppZJTzFTkv8VyEzJhKko4= github.com/aws/aws-sdk-go-v2/credentials v1.13.4 h1:nEbHIyJy7mCvQ/kzGG7VWHSBpRB4H6sJy3bWierWUtg= github.com/aws/aws-sdk-go-v2/credentials v1.13.4/go.mod h1:/Cj5w9LRsNTLSwexsohwDME32OzJ6U81Zs33zr2ZWOM= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 h1:tpNOglTZ8kg9T38NpcGBxudqfUAwUzyUnLQ4XSd0CHE= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20/go.mod h1:d9xFpWd3qYwdIXM0fvu7deD08vvdRXyc/ueV+0SqaWE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 h1:5WU31cY7m0tG+AiaXuXGoMzo2GBQ1IixtWa8Yywsgco= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26/go.mod h1:2E0LdbJW6lbeU4uxjum99GZzI0ZjDpAb0CoSCM0oeEY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 h1:WW0qSzDWoiWU2FS5DbKpxGilFVlCEJPwx4YtjdfI0Jw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 h1:y+8n9AGDjikyXoMBTRaHHHSaFEB8267ykmvyPodJfys= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20/go.mod h1:/+6lSiby8TBFpTVXZgKiN/rCfkYXEGvhlM4zCgPpt7w= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 h1:r+Kv+SEJquhAZXaJ7G4u44cIwXV3f8K+N482NNAzJZA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24/go.mod h1:gAuCezX/gob6BSMbItsSlMb6WZGV7K2+fWOvk8xBSto= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 h1:N2eKFw2S+JWRCtTt0IhIX7uoGGQciD4p6ba+SJv4WEU= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27/go.mod h1:RdwFVc7PBYWY33fa2+8T1mSqQ7ZEK4ILpM0wfioDC3w= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 h1:jlgyHbkZQAgAc7VIxJDmtouH8eNjOk2REVAQfVhdaiQ= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20/go.mod h1:Xs52xaLBqDEKRcAfX/hgjmD3YQ7c/W+BEyfamlO/W2E= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.23.5 h1:jA6VOKxMvwEZSbUmidVkubHxEd5/CllfpdUSPQ7wwv4= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.23.5/go.mod h1:2nUG9O81bApzrD7ixZ//VGGXXwooek/N+EQqeXb9208= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 h1:+lpa31bGPPvgpZwUJ4ldKRCsPukzJ0PqoO5AQ9S79oQ= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1/go.mod h1:vKGWzDG4Ytw3hgv/FvNy0HX/XEoJ6k/e7KAANzXWP8Y= github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 h1:ActQgdTNQej/RuUJjB9uxYVLDOvRGtUreXF8L3c8wyg= github.com/aws/aws-sdk-go-v2/service/sso v1.11.26/go.mod h1:uB9tV79ULEZUXc6Ob18A46KSQ0JDlrplPni9XW6Ot60= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 h1:wihKuqYUlA2T/Rx+yu2s6NDAns8B9DgnRooB1PVhY+Q= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9/go.mod h1:2E/3D/mB8/r2J7nK42daoKP/ooCwbf0q1PznNc+DZTU= github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 h1:VQFOLQVL3BrKM/NLO/7FiS4vcp5bqK0mGMyk09xLoAY= github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4R75qFYnO641RZGAoV3uH1c= -github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= From 96c6b385eff7e06e57d8fa60c1e5f575a8ee0a7c Mon Sep 17 00:00:00 2001 From: Devendra Turkar Date: Wed, 5 Apr 2023 15:32:36 +0530 Subject: [PATCH 138/262] chore: publish ubi based image (#1412) * chore: publish ubi based image - added publish step to publish ubi image - updated base image for alpine based dockerfile * chore: update pipeline image to ubuntu-latest --- .github/workflows/build.yml | 8 ++++---- .github/workflows/mkdocs-deploy.yaml | 2 +- .github/workflows/publish.yml | 21 +++++++++++++++++++-- .github/workflows/release.yml | 2 +- Dockerfile | 2 +- Dockerfile.ubi8 => Dockerfile.ubi | 4 ++-- makefile | 7 +++++++ 7 files changed, 35 insertions(+), 11 deletions(-) rename Dockerfile.ubi8 => Dockerfile.ubi (93%) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 34984193c..c60bdfa55 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,7 +21,7 @@ env: jobs: lint: name: Lint - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Setup Go uses: actions/setup-go@v3 @@ -38,7 +38,7 @@ jobs: args: --verbose unit: name: Unit tests - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Setup Go uses: actions/setup-go@v3 @@ -54,7 +54,7 @@ jobs: file: ./coverage.txt e2e: name: E2e tests - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Setup Go uses: actions/setup-go@v3 @@ -83,7 +83,7 @@ jobs: expected_result: PASSED release: name: Release snapshot - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest needs: [e2e, unit] steps: - name: Setup Go diff --git a/.github/workflows/mkdocs-deploy.yaml b/.github/workflows/mkdocs-deploy.yaml index 8e22cc71a..a7bf4e2b5 100644 --- a/.github/workflows/mkdocs-deploy.yaml +++ b/.github/workflows/mkdocs-deploy.yaml @@ -13,7 +13,7 @@ on: jobs: deploy: name: Deploy documentation - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Checkout main uses: actions/checkout@v3 diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index c32ff3a92..bbf94eb62 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -12,7 +12,7 @@ env: jobs: publish: name: Publish - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Check Out Repo uses: actions/checkout@v3 @@ -49,7 +49,7 @@ jobs: - name: Build and push - Docker/ECR id: docker_build - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 with: context: . platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x @@ -64,5 +64,22 @@ jobs: public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:latest cache-from: type=local,src=/tmp/.buildx-cache/release cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release + + - name: Build and push ubi image - Docker/ECR + id: docker_build_ubi + uses: docker/build-push-action@v4 + with: + context: . + platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x + builder: ${{ steps.buildx.outputs.name }} + push: true + file: Dockerfile.ubi + build-args: | + KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + tags: | + ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi + public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi + cache-from: type=local,src=/tmp/.buildx-cache/release + cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release - name: Image digest run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0ec4cde92..519fdbcda 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,7 +12,7 @@ env: jobs: release: name: Release - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Setup Go uses: actions/setup-go@v3 diff --git a/Dockerfile b/Dockerfile index ef8675494..453382588 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.17.0 AS run +FROM alpine:3.17 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 diff --git a/Dockerfile.ubi8 b/Dockerfile.ubi similarity index 93% rename from Dockerfile.ubi8 rename to Dockerfile.ubi index 8c84ca07d..5f790c361 100644 --- a/Dockerfile.ubi8 +++ b/Dockerfile.ubi @@ -11,9 +11,9 @@ RUN make build && cp kube-bench /go/bin/kube-bench # ubi8-minimal base image for build with ubi standards -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.7 as run +FROM registry.access.redhat.com/ubi8/ubi-minimal as run -RUN microdnf install yum findutils openssl\ +RUN microdnf install -y yum findutils openssl \ && yum -y update-minimal --security --sec-severity=Moderate --sec-severity=Important --sec-severity=Critical \ && yum update -y \ && yum install -y glibc \ diff --git a/makefile b/makefile index 61b5b54e9..4049c55ec 100644 --- a/makefile +++ b/makefile @@ -4,6 +4,7 @@ DOCKER_ORG ?= aquasec VERSION ?= $(shell git rev-parse --short=7 HEAD) KUBEBENCH_VERSION ?= $(shell git describe --tags --abbrev=0) IMAGE_NAME ?= $(DOCKER_ORG)/$(BINARY):$(VERSION) +IMAGE_NAME_UBI ?= $(DOCKER_ORG)/$(BINARY):$(VERSION)-ubi GOOS ?= linux BUILD_OS := linux uname := $(shell uname -s) @@ -45,6 +46,12 @@ build-docker: --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ -t $(IMAGE_NAME) . +build-docker-ubi: + docker build -f Dockerfile.ubi --build-arg BUILD_DATE=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ + --build-arg VCS_REF=$(VERSION) \ + --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ + -t $(IMAGE_NAME_UBI) . + # unit tests tests: GO111MODULE=on go test -vet all -short -race -timeout 30s -coverprofile=coverage.txt -covermode=atomic ./... From c3b6871766984be022da766999c0aedb482f91ae Mon Sep 17 00:00:00 2001 From: Rayan Das Date: Fri, 7 Apr 2023 20:03:52 +0530 Subject: [PATCH 139/262] Fix version in policies.yaml (#1415) --- cfg/cis-1.24/policies.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfg/cis-1.24/policies.yaml b/cfg/cis-1.24/policies.yaml index 59c9bf883..b59afc67c 100644 --- a/cfg/cis-1.24/policies.yaml +++ b/cfg/cis-1.24/policies.yaml @@ -1,6 +1,6 @@ --- controls: -version: "cis-1.25" +version: "cis-1.24" id: 5 text: "Kubernetes Policies" type: "policies" From 124a8b3a5a80f00cf88dc82f8b4bd13ddcacf9c0 Mon Sep 17 00:00:00 2001 From: chenk Date: Mon, 10 Apr 2023 13:59:13 +0300 Subject: [PATCH 140/262] release: prepare v0.6.13-rc (#1416) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index e5e5ce215..f88c449fa 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:v0.6.12 + image: docker.io/aquasec/kube-bench:v0.6.13-rc command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From 0ff5dd0b8e0e7ea5c2d91117e3d25249e09a0896 Mon Sep 17 00:00:00 2001 From: Devendra Turkar Date: Mon, 17 Apr 2023 18:37:31 +0530 Subject: [PATCH 141/262] chore: Add license file for ubi image (#1425) --- Dockerfile.ubi | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 5f790c361..37654f3aa 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -29,6 +29,7 @@ WORKDIR /opt/kube-bench/ ENV PATH=$PATH:/usr/local/mount-from-host/bin +COPY LICENSE /licenses/LICENSE COPY --from=build /go/bin/kube-bench /usr/local/bin/kube-bench COPY entrypoint.sh . COPY cfg/ cfg/ From dd6573f3ed5f2760db90a42eab35f126c9f86aa2 Mon Sep 17 00:00:00 2001 From: chenk Date: Mon, 17 Apr 2023 16:19:37 +0300 Subject: [PATCH 142/262] release: prepare v0.6.13-rc2 (#1426) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index f88c449fa..1a5b93d6d 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:v0.6.13-rc + image: docker.io/aquasec/kube-bench:v0.6.13-rc2 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From b43f58dcda3ebe6360c1fe99d0d0a23f1e843d03 Mon Sep 17 00:00:00 2001 From: Murali Paluru Date: Tue, 18 Apr 2023 23:45:05 +0530 Subject: [PATCH 143/262] add darwin builds (#1428) --- .goreleaser.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.goreleaser.yml b/.goreleaser.yml index fed2273a7..78d0bb485 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -8,6 +8,7 @@ builds: binary: kube-bench goos: - linux + - darwin goarch: - amd64 - arm From 8098489433cac2247eb614d3cd155524270bb1df Mon Sep 17 00:00:00 2001 From: chenk Date: Mon, 24 Apr 2023 11:02:19 +0300 Subject: [PATCH 144/262] release: prepare v0.6.13 (#1429) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 1a5b93d6d..ffbed8b60 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:v0.6.13-rc2 + image: docker.io/aquasec/kube-bench:v0.6.13 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From e38c829dbcc3430a565f84dcdc388c89713e4f5d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 13 May 2023 19:37:45 +0300 Subject: [PATCH 145/262] build(deps): bump gorm.io/gorm from 1.24.2 to 1.25.1 (#1437) Bumps [gorm.io/gorm](https://github.com/go-gorm/gorm) from 1.24.2 to 1.25.1. - [Release notes](https://github.com/go-gorm/gorm/releases) - [Commits](https://github.com/go-gorm/gorm/compare/v1.24.2...v1.25.1) --- updated-dependencies: - dependency-name: gorm.io/gorm dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 1b650829d..7733dd191 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/stretchr/testify v1.8.1 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.4.6 - gorm.io/gorm v1.24.2 + gorm.io/gorm v1.25.1 k8s.io/client-go v0.26.0 ) diff --git a/go.sum b/go.sum index 069c3883d..beffcca54 100644 --- a/go.sum +++ b/go.sum @@ -592,8 +592,9 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.4.6 h1:1FPESNXqIKG5JmraaH2bfCVlMQ7paLoCreFxDtqzwdc= gorm.io/driver/postgres v1.4.6/go.mod h1:UJChCNLFKeBqQRE+HrkFUbKbq9idPXmTOk2u4Wok8S4= -gorm.io/gorm v1.24.2 h1:9wR6CFD+G8nOusLdvkZelOEhpJVwwHzpQOUM+REd6U0= gorm.io/gorm v1.24.2/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= +gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64= +gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From b0e49c87899425db8d3786e81ac4a4ab4e70c7af Mon Sep 17 00:00:00 2001 From: Devendra Turkar Date: Mon, 15 May 2023 17:31:30 +0530 Subject: [PATCH 146/262] fix: ignore the error from findConfigFile (#1440) When we are trying to access a file from a directory which is not present then we get different error. We dont have standard error method to check the msg so added string match for this case --- cmd/util.go | 2 +- cmd/util_test.go | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/util.go b/cmd/util.go index d67d854c1..59f20b13f 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -241,7 +241,7 @@ func findConfigFile(candidates []string) string { if err == nil { return c } - if !os.IsNotExist(err) { + if !os.IsNotExist(err) && !strings.HasSuffix(err.Error(), "not a directory") { exitWithError(fmt.Errorf("error looking for file %s: %v", c, err)) } } diff --git a/cmd/util_test.go b/cmd/util_test.go index ecdd04c53..8c9327d4d 100644 --- a/cmd/util_test.go +++ b/cmd/util_test.go @@ -15,6 +15,7 @@ package cmd import ( + "errors" "io/ioutil" "os" "path/filepath" @@ -233,6 +234,7 @@ func TestFindConfigFile(t *testing.T) { {input: []string{"myfile"}, statResults: []error{nil}, exp: "myfile"}, {input: []string{"thisfile", "thatfile"}, statResults: []error{os.ErrNotExist, nil}, exp: "thatfile"}, {input: []string{"thisfile", "thatfile"}, statResults: []error{os.ErrNotExist, os.ErrNotExist}, exp: ""}, + {input: []string{"thisfile", "/etc/dummy/thatfile"}, statResults: []error{os.ErrNotExist, errors.New("stat /etc/dummy/thatfile: not a directory")}, exp: ""}, } statFunc = fakestat From 29c8f16167c43292bea9bc6bc8d3403465b572c1 Mon Sep 17 00:00:00 2001 From: chenk Date: Mon, 15 May 2023 15:34:00 +0300 Subject: [PATCH 147/262] release: prepare v0.6.14-rc (#1442) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index ffbed8b60..9a77ddf87 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:v0.6.13 + image: docker.io/aquasec/kube-bench:v0.6.14-rc command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From 968ee5814e25b4ef89cda3b1c3f2c0e0c984a506 Mon Sep 17 00:00:00 2001 From: wangxiaoer Date: Tue, 16 May 2023 16:41:49 +0800 Subject: [PATCH 148/262] replace with constant (#1445) --- check/test.go | 8 ++++---- cmd/util.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/check/test.go b/check/test.go index bb366230b..2aee9b0b0 100644 --- a/check/test.go +++ b/check/test.go @@ -24,7 +24,7 @@ import ( "strings" "github.com/golang/glog" - yaml "gopkg.in/yaml.v2" + "gopkg.in/yaml.v2" "k8s.io/client-go/util/jsonpath" ) @@ -243,11 +243,11 @@ func (t testItem) evaluate(s string) *testOutput { isExist = "does not exist" } switch t.auditUsed { - case "auditCommand": + case AuditCommand: glog.V(3).Infof("Flag '%s' %s", t.Flag, isExist) - case "auditConfig": + case AuditConfig: glog.V(3).Infof("Path '%s' %s", t.Path, isExist) - case "auditEnv": + case AuditEnv: glog.V(3).Infof("Env '%s' %s", t.Env, isExist) default: glog.V(3).Infof("Error with identify audit used %s", t.auditUsed) diff --git a/cmd/util.go b/cmd/util.go index 59f20b13f..d85d024e7 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -127,7 +127,7 @@ func getConfigFilePath(benchmarkVersion string, filename string) (path string, e glog.V(2).Info(fmt.Sprintf("Looking for config specific CIS version %q", benchmarkVersion)) path = filepath.Join(cfgDir, benchmarkVersion) - file := filepath.Join(path, string(filename)) + file := filepath.Join(path, filename) glog.V(2).Info(fmt.Sprintf("Looking for file: %s", file)) if _, err := os.Stat(file); err != nil { From c2880848f05aaf87c1768c226840356a9a4688fd Mon Sep 17 00:00:00 2001 From: chenk Date: Thu, 18 May 2023 10:32:39 +0300 Subject: [PATCH 149/262] release: prepare v0.6.14 (#1446) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 9a77ddf87..5cebc62e2 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:v0.6.14-rc + image: docker.io/aquasec/kube-bench:v0.6.14 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From 6de03bbd7dcf568de7279abc3062f5815f85bdff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 20 May 2023 18:45:31 +0300 Subject: [PATCH 150/262] build(deps): bump github.com/aws/aws-sdk-go-v2 from 1.17.6 to 1.18.0 (#1433) Bumps [github.com/aws/aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2) from 1.17.6 to 1.18.0. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.17.6...v1.18.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 7733dd191..7e3718ede 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.19 require ( - github.com/aws/aws-sdk-go-v2 v1.17.6 + github.com/aws/aws-sdk-go-v2 v1.18.0 github.com/aws/aws-sdk-go-v2/config v1.18.4 github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 github.com/fatih/color v1.14.1 diff --git a/go.sum b/go.sum index beffcca54..92edb77d7 100644 --- a/go.sum +++ b/go.sum @@ -39,8 +39,9 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.17.6 h1:Y773UK7OBqhzi5VDXMi1zVGsoj+CVHs2eaC2bDsLwi0= github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY= +github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2/config v1.18.4 h1:VZKhr3uAADXHStS/Gf9xSYVmmaluTUfkc0dcbPiDsKE= github.com/aws/aws-sdk-go-v2/config v1.18.4/go.mod h1:EZxMPLSdGAZ3eAmkqXfYbRppZJTzFTkv8VyEzJhKko4= github.com/aws/aws-sdk-go-v2/credentials v1.13.4 h1:nEbHIyJy7mCvQ/kzGG7VWHSBpRB4H6sJy3bWierWUtg= From e41755ba907aec20c20f403d0b2db993269a2df6 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Sun, 21 May 2023 16:39:51 +0800 Subject: [PATCH 151/262] cis-1.24: fix tests of 1.1.1 and 4.2.9 were wrong (#1423) fixes #1410 fixes #1421 --- cfg/cis-1.24/master.yaml | 6 +++--- cfg/cis-1.24/node.yaml | 6 +++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/cfg/cis-1.24/master.yaml b/cfg/cis-1.24/master.yaml index a13333d5e..bd11d8b0b 100644 --- a/cfg/cis-1.24/master.yaml +++ b/cfg/cis-1.24/master.yaml @@ -9,18 +9,18 @@ groups: text: "Control Plane Node Configuration Files" checks: - id: 1.1.1 - text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" tests: test_items: - flag: "permissions" compare: op: bitmask - value: "644" + value: "600" remediation: | Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 644 $apiserverconf + For example, chmod 600 $apiserverconf scored: true - id: 1.1.2 diff --git a/cfg/cis-1.24/node.yaml b/cfg/cis-1.24/node.yaml index f14817e34..8acf6537d 100644 --- a/cfg/cis-1.24/node.yaml +++ b/cfg/cis-1.24/node.yaml @@ -350,8 +350,12 @@ groups: - flag: --event-qps path: '{.eventRecordQPS}' compare: - op: eq + op: gte value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or remediation: | If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. If using command line arguments, edit the kubelet service file From 124c57c6f42b7d5dc84cc433b8fcb7e0d9e7bd4b Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Sun, 21 May 2023 20:46:16 +0800 Subject: [PATCH 152/262] support CIS Kubernetes Benchmark v1.7.0 (#1424) --- cfg/cis-1.7/config.yaml | 2 + cfg/cis-1.7/controlplane.yaml | 60 +++ cfg/cis-1.7/etcd.yaml | 135 +++++ cfg/cis-1.7/master.yaml | 947 ++++++++++++++++++++++++++++++++++ cfg/cis-1.7/node.yaml | 461 +++++++++++++++++ cfg/cis-1.7/policies.yaml | 304 +++++++++++ cfg/config.yaml | 7 + cmd/common_test.go | 1 + docs/architecture.md | 1 + docs/platforms.md | 35 +- 10 files changed, 1936 insertions(+), 17 deletions(-) create mode 100644 cfg/cis-1.7/config.yaml create mode 100644 cfg/cis-1.7/controlplane.yaml create mode 100644 cfg/cis-1.7/etcd.yaml create mode 100644 cfg/cis-1.7/master.yaml create mode 100644 cfg/cis-1.7/node.yaml create mode 100644 cfg/cis-1.7/policies.yaml diff --git a/cfg/cis-1.7/config.yaml b/cfg/cis-1.7/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/cis-1.7/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/cis-1.7/controlplane.yaml b/cfg/cis-1.7/controlplane.yaml new file mode 100644 index 000000000..eca90eb1d --- /dev/null +++ b/cfg/cis-1.7/controlplane.yaml @@ -0,0 +1,60 @@ +--- +controls: +version: "cis-1.7" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/cis-1.7/etcd.yaml b/cfg/cis-1.7/etcd.yaml new file mode 100644 index 000000000..5fbc8ca05 --- /dev/null +++ b/cfg/cis-1.7/etcd.yaml @@ -0,0 +1,135 @@ +--- +controls: +version: "cis-1.7" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/cis-1.7/master.yaml b/cfg/cis-1.7/master.yaml new file mode 100644 index 000000000..135824202 --- /dev/null +++ b/cfg/cis-1.7/master.yaml @@ -0,0 +1,947 @@ +--- +controls: +version: "cis-1.7" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: have + value: "DenyServiceExternalIPs" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: false + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 - NoteThis recommendation is obsolete and will be deleted per the consensus process (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: false + + - id: 1.2.17 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.29 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.30 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.7/node.yaml b/cfg/cis-1.7/node.yaml new file mode 100644 index 000000000..c9eded26e --- /dev/null +++ b/cfg/cis-1.7/node.yaml @@ -0,0 +1,461 @@ +--- +controls: +version: "cis-1.7" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: false + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false diff --git a/cfg/cis-1.7/policies.yaml b/cfg/cis-1.7/policies.yaml new file mode 100644 index 000000000..82592a860 --- /dev/null +++ b/cfg/cis-1.7/policies.yaml @@ -0,0 +1,304 @@ +--- +controls: +version: "cis-1.7" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/config.yaml b/cfg/config.yaml index 306c87553..de58a7794 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -258,6 +258,7 @@ version_mapping: "1.22": "cis-1.23" "1.23": "cis-1.23" "1.24": "cis-1.24" + "1.25": "cis-1.7" "eks-1.0.1": "eks-1.0.1" "eks-1.1.0": "eks-1.1.0" "gke-1.0": "gke-1.0" @@ -306,6 +307,12 @@ target_mapping: - "controlplane" - "etcd" - "policies" + "cis-1.7": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" "gke-1.0": - "master" - "node" diff --git a/cmd/common_test.go b/cmd/common_test.go index a47cdd1e3..028a41a82 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -240,6 +240,7 @@ func TestMapToCISVersion(t *testing.T) { {kubeVersion: "1.22", succeed: true, exp: "cis-1.23"}, {kubeVersion: "1.23", succeed: true, exp: "cis-1.23"}, {kubeVersion: "1.24", succeed: true, exp: "cis-1.24"}, + {kubeVersion: "1.25", succeed: true, exp: "cis-1.7"}, {kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"}, {kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"}, {kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"}, diff --git a/docs/architecture.md b/docs/architecture.md index 118cb218c..2904fe134 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -20,6 +20,7 @@ The following table shows the valid targets based on the CIS Benchmark version. | cis-1.20 | master, controlplane, node, etcd, policies | | cis-1.23 | master, controlplane, node, etcd, policies | | cis-1.24 | master, controlplane, node, etcd, policies | +| cis-1.7 | master, controlplane, node, etcd, policies | | gke-1.0 | master, controlplane, node, etcd, policies, managedservices | | gke-1.2.0 | controlplane, node, policies, managedservices | | eks-1.0.1 | controlplane, node, policies, managedservices | diff --git a/docs/platforms.md b/docs/platforms.md index 2842813c3..94f81ccaa 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -8,20 +8,21 @@ Most of our supported benchmarks are defined in one of the following: Some defined by other hardenening guides. -| Source | Kubernetes Benchmark | kube-bench config | Kubernetes versions | -|------|----------------------------------------------------------------|-----------|-------------| -| CIS | [1.5.1](https://workbench.cisecurity.org/benchmarks/4892) | cis-1.5 | 1.15 | -| CIS | [1.6.0](https://workbench.cisecurity.org/benchmarks/4834) | cis-1.6 | 1.16-1.18 | -| CIS | [1.20](https://workbench.cisecurity.org/benchmarks/6246) | cis-1.20 | 1.19-1.21 | -| CIS | [1.23](https://workbench.cisecurity.org/benchmarks/7532) | cis-1.23 | 1.22-1.23 | -| CIS | [1.24](https://workbench.cisecurity.org/benchmarks/10873) | cis-1.24 | 1.24 | -| CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | -| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | -| CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | -| CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS | -| CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | -| CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | -| RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | -| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | -| CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 | -| DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS | +| Source | Kubernetes Benchmark | kube-bench config | Kubernetes versions | +|------|-------------------------------------------------------------------------------------------------------------|--------------------------|---------------------| +| CIS | [1.5.1](https://workbench.cisecurity.org/benchmarks/4892) | cis-1.5 | 1.15 | +| CIS | [1.6.0](https://workbench.cisecurity.org/benchmarks/4834) | cis-1.6 | 1.16-1.18 | +| CIS | [1.20](https://workbench.cisecurity.org/benchmarks/6246) | cis-1.20 | 1.19-1.21 | +| CIS | [1.23](https://workbench.cisecurity.org/benchmarks/7532) | cis-1.23 | 1.22-1.23 | +| CIS | [1.24](https://workbench.cisecurity.org/benchmarks/10873) | cis-1.24 | 1.24 | +| CIS | [1.7](https://workbench.cisecurity.org/benchmarks/11107) | cis-1.7 | 1.25 | +| CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | +| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | +| CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | +| CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS | +| CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | +| CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | +| RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | +| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | +| CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 | +| DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS | From 60dde65d72ffd3ad66b713b5f815d47bfa1dcf5e Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Sun, 21 May 2023 22:53:58 +0800 Subject: [PATCH 153/262] support CIS Amazon Elastic Kubernetes Service (EKS) Benchmark v1.2.0 (#1449) closes #1448 --- cfg/config.yaml | 7 + cfg/eks-1.2.0/config.yaml | 9 + cfg/eks-1.2.0/controlplane.yaml | 14 + cfg/eks-1.2.0/managedservices.yaml | 154 +++++ cfg/eks-1.2.0/master.yaml | 6 + cfg/eks-1.2.0/node.yaml | 330 +++++++++++ cfg/eks-1.2.0/policies.yaml | 213 +++++++ cmd/common_test.go | 6 + cmd/util.go | 2 +- cmd/util_test.go | 2 +- docs/architecture.md | 1 + docs/platforms.md | 1 + go.sum | 923 +++++++++++++++++++++++++++++ job-eks-asff.yaml | 4 +- job-eks.yaml | 2 +- 15 files changed, 1669 insertions(+), 5 deletions(-) create mode 100644 cfg/eks-1.2.0/config.yaml create mode 100644 cfg/eks-1.2.0/controlplane.yaml create mode 100644 cfg/eks-1.2.0/managedservices.yaml create mode 100644 cfg/eks-1.2.0/master.yaml create mode 100644 cfg/eks-1.2.0/node.yaml create mode 100644 cfg/eks-1.2.0/policies.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index de58a7794..861fe45f3 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -261,6 +261,7 @@ version_mapping: "1.25": "cis-1.7" "eks-1.0.1": "eks-1.0.1" "eks-1.1.0": "eks-1.1.0" + "eks-1.2.0": "eks-1.2.0" "gke-1.0": "gke-1.0" "gke-1.2.0": "gke-1.2.0" "ocp-3.10": "rh-0.7" @@ -338,6 +339,12 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "eks-1.2.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" "rh-0.7": - "master" - "node" diff --git a/cfg/eks-1.2.0/config.yaml b/cfg/eks-1.2.0/config.yaml new file mode 100644 index 000000000..17301a751 --- /dev/null +++ b/cfg/eks-1.2.0/config.yaml @@ -0,0 +1,9 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml +## These settings are required if you are using the --asff option to report findings to AWS Security Hub +## AWS account number is required. +AWS_ACCOUNT: "" +## AWS region is required. +AWS_REGION: "" +## EKS Cluster ARN is required. +CLUSTER_ARN: "" diff --git a/cfg/eks-1.2.0/controlplane.yaml b/cfg/eks-1.2.0/controlplane.yaml new file mode 100644 index 000000000..d7e3444b2 --- /dev/null +++ b/cfg/eks-1.2.0/controlplane.yaml @@ -0,0 +1,14 @@ +--- +controls: +version: "eks-1.2.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Logging" + checks: + - id: 2.1.1 + text: "Enable audit logs (Manual)" + remediation: "Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler." + scored: false diff --git a/cfg/eks-1.2.0/managedservices.yaml b/cfg/eks-1.2.0/managedservices.yaml new file mode 100644 index 000000000..c9ae5ff3f --- /dev/null +++ b/cfg/eks-1.2.0/managedservices.yaml @@ -0,0 +1,154 @@ +--- +controls: +version: "eks-1.2.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third-party provider (Manual)" + type: "manual" + remediation: | + To utilize AWS ECR for Image scanning please follow the steps below: + + To create a repository configured for scan on push (AWS CLI): + aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + To edit the settings of an existing repository (AWS CLI): + aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + Use the following steps to start a manual image scan using the AWS Management Console. + Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories. + From the navigation bar, choose the Region to create your repository in. + In the navigation pane, choose Repositories. + On the Repositories page, choose the repository that contains the image to scan. + On the Images page, select the image to scan and then choose Scan. + scored: false + + - id: 5.1.2 + text: "Minimize user access to Amazon ECR (Manual)" + type: "manual" + remediation: | + Before you use IAM to manage access to Amazon ECR, you should understand what IAM features + are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other + AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide. + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Amazon ECR (Manual)" + type: "manual" + remediation: | + You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites. + + The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess + the following IAM policy permissions for Amazon ECR. + + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken" + ], + "Resource": "*" + } + ] + } + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Prefer using dedicated Amazon EKS Service Accounts (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.3 + text: "AWS Key Management Service (KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)" + type: "manual" + remediation: | + This process can only be performed during Cluster Creation. + + Enable 'Secrets Encryption' during Amazon EKS cluster creation as described + in the links within the 'References' section. + scored: false + + - id: 5.4 + text: "Cluster Networking" + checks: + - id: 5.4.1 + text: "Restrict Access to the Control Plane Endpoint (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.2 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.3 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.4 + text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.5 + text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + + - id: 5.5 + text: "Authentication and Authorization" + checks: + - id: 5.5.1 + text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes (Manual)" + type: "manual" + remediation: | + Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation. + scored: false + + + - id: 5.6 + text: "Other Cluster Configurations" + checks: + - id: 5.6.1 + text: "Consider Fargate for running untrusted workloads (Manual)" + type: "manual" + remediation: | + Create a Fargate profile for your cluster Before you can schedule pods running on Fargate + in your cluster, you must define a Fargate profile that specifies which pods should use + Fargate when they are launched. For more information, see AWS Fargate profile. + + Note: If you created your cluster with eksctl using the --fargate option, then a Fargate profile has + already been created for your cluster with selectors for all pods in the kube-system + and default namespaces. Use the following procedure to create Fargate profiles for + any other namespaces you would like to use with Fargate. + scored: false diff --git a/cfg/eks-1.2.0/master.yaml b/cfg/eks-1.2.0/master.yaml new file mode 100644 index 000000000..8da0179b3 --- /dev/null +++ b/cfg/eks-1.2.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "eks-1.2.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/eks-1.2.0/node.yaml b/cfg/eks-1.2.0/node.yaml new file mode 100644 index 000000000..54a3a9625 --- /dev/null +++ b/cfg/eks-1.2.0/node.yaml @@ -0,0 +1,330 @@ +--- +controls: +version: "eks-1.2.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the Anonymous Auth is Not Enabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.3 + text: "Ensure that a Client CA File is Configured (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.4 + text: "Ensure that the --read-only-port is disabled (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: true + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.9 + text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: gte + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.10 + text: "Ensure that the --rotate-certificates argument is not present or is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: true + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.11 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + - id: 3.3 + text: "Container Optimized OS" + checks: + - id: 3.3.1 + text: "Prefer using a container-optimized OS when possible (Manual)" + remediation: "No remediation" + scored: false diff --git a/cfg/eks-1.2.0/policies.yaml b/cfg/eks-1.2.0/policies.yaml new file mode 100644 index 000000000..7f5db95d9 --- /dev/null +++ b/cfg/eks-1.2.0/policies.yaml @@ -0,0 +1,213 @@ +--- +controls: +version: "eks-1.2.0" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 4.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 4.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 4.2 + text: "Pod Security Policies" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that + the .spec.privileged field is omitted or set to false. + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostPID field is omitted or set to false. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostIPC field is omitted or set to false. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostNetwork field is omitted or set to false. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.allowPrivilegeEscalation field is omitted or set to false. + scored: false + + - id: 4.2.6 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of + UIDs not including 0. + scored: false + + - id: 4.2.7 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in PSPs for the cluster unless + it is set to an empty array. + scored: false + + - id: 4.2.8 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 4.3 + text: "CNI Plugin" + checks: + - id: 4.3.1 + text: "Ensure CNI plugin supports network policies (Manual)" + type: "manual" + remediation: | + As with RBAC policies, network policies should adhere to the policy of least privileged + access. Start by creating a deny all policy that restricts all inbound and outbound traffic + from a namespace or create a global policy using Calico. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "Extensible Admission Control" + checks: [] + + - id: 4.6 + text: "General Policies" + checks: + - id: 4.6.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.6.2 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.6.3 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/common_test.go b/cmd/common_test.go index 028a41a82..89cd7f84d 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -450,6 +450,12 @@ func TestValidTargets(t *testing.T) { targets: []string{"node", "policies", "controlplane", "managedservices"}, expected: true, }, + { + name: "eks-1.2.0 valid", + benchmark: "eks-1.2.0", + targets: []string{"node", "policies", "controlplane", "managedservices"}, + expected: true, + }, } for _, c := range cases { diff --git a/cmd/util.go b/cmd/util.go index d85d024e7..a5765bcd5 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -462,7 +462,7 @@ func getPlatformBenchmarkVersion(platform Platform) string { glog.V(3).Infof("getPlatformBenchmarkVersion platform: %s", platform) switch platform.Name { case "eks": - return "eks-1.1.0" + return "eks-1.2.0" case "gke": switch platform.Version { case "1.15", "1.16", "1.17", "1.18", "1.19": diff --git a/cmd/util_test.go b/cmd/util_test.go index 8c9327d4d..ad8d2a02d 100644 --- a/cmd/util_test.go +++ b/cmd/util_test.go @@ -636,7 +636,7 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) { args: args{ platform: Platform{Name: "eks"}, }, - want: "eks-1.1.0", + want: "eks-1.2.0", }, { name: "gke 1.19", diff --git a/docs/architecture.md b/docs/architecture.md index 2904fe134..a464afb97 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -25,6 +25,7 @@ The following table shows the valid targets based on the CIS Benchmark version. | gke-1.2.0 | controlplane, node, policies, managedservices | | eks-1.0.1 | controlplane, node, policies, managedservices | | eks-1.1.0 | controlplane, node, policies, managedservices | +| eks-1.2.0 | controlplane, node, policies, managedservices | | ack-1.0 | master, controlplane, node, etcd, policies, managedservices | | aks-1.0 | controlplane, node, policies, managedservices | | rh-0.7 | master,node| diff --git a/docs/platforms.md b/docs/platforms.md index 94f81ccaa..904d787b8 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -20,6 +20,7 @@ Some defined by other hardenening guides. | CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | | CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | | CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS | +| CIS | [EKS 1.2.0](https://workbench.cisecurity.org/benchmarks/9681) | eks-1.2.0 | EKS | | CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | | CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | | RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | diff --git a/go.sum b/go.sum index 92edb77d7..64c2cbbc8 100644 --- a/go.sum +++ b/go.sum @@ -17,27 +17,272 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0 h1:QqHZT1IMldf/daXoSnkJWBIqGBsw50X+xP6HSVzLRPo= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0 h1:NKw6PpQi6V1O+KsjuTd+bhip9d0REYu4NevC45vtGp8= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0 h1:TCMhwWEWhCn8d44/Zs7UCICTWje9j3HuV6nVGMjdpYw= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0 h1:9yKYCozdh29v7QMx3QBuksZGtPNICFb5SVnyNvkKRGg= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0 h1:qzYOcI6u4CD+0R1E8rWbrqs04fISCcg2YYxW8yBAqFM= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0 h1:IYhjgcgwb5TIAhC0aWQGGOqBnP0c2xijgMGf1iJRs50= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0 h1:U+kHmeKGXgBvTlrecPJhwkItWaIpIscG5DUpQxBQZZg= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0 h1:JuTk8po4bCKRwObdT0zLb1K0BGkGHJdtgs2GK3j2Gws= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0 h1:4RESn+mA7eGPBr5eQ4B/hbkHNivzYHbgRWpdlNeNjiE= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0 h1:5F7dowxGuYQlX3LjfjH/sKf+IvI1TsItTw0sDZmoec4= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0 h1:IL5W4fh6dAq9x1mO+4evrWCISOmPJegdaO0hZRZmWNE= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0 h1:2824iym832ljKdVpCBnpqm5K94YT/uHTVhNF+dRTXPI= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0 h1:xzXGAE2fAuMh+ksODKr9nRv9ega1vHjFwRqMA8tRrVE= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0 h1:CW3541Fm7KPTyZjJdnX6NtaGXYFn5XbFC5UcjgALKvU= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0 h1:fnwkyzCVcPI/TmBheGgpmK2h+hWUIDHcZBincHRhrQ0= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0 h1:dp8jOF21n/7jwgo/uuA0RN8hvLcKO4q6s/yvwevs2ZM= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0 h1:gx9jr41ytcA3dXkbbd409euEaWtofCVXYBvJz3iYm18= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0 h1:ula4YR2K66o5wifLdPQMtR2I6KP+zvqdSEb6ncd1e0g= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0 h1:NU0Pj57H++JQOW225/7o34sUZ4i9/TLfWFOSbI3N1cY= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0 h1:CipwaecNhtsWUSneV2J5y8OqudHqvqPlcMHgSyh8vak= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0 h1:pu3JIgC1rswIqi5romW0JgNO6CTUydLYX8zyjiAvO1c= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0 h1:hd6J2n5dBBRuAqnNUEsKWrp6XNPKsaxwwIyzOPZTokk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/firestore v1.8.0 h1:HokMB9Io0hAyYzlGFeFVMgE3iaPXNvaIsDx5JzblGLI= +cloud.google.com/go/firestore v1.8.0/go.mod h1:r3KB8cAdRIe8znzoPWLw8S6gpDVd9treohhn8b09424= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0 h1:s3Snbr2O4j4p7CuwImBas8rNNmkHS1YJANcCpKGqQSE= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0 h1:PKggmegChZulPW8yvtziF8P9UOuVFwbvylbEucTNups= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0 h1:zAcvDa04tTnGdu6TEZewaLN2tdMtUOJJ7fEceULjguA= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0 h1:JTcTaYQRGsVm+qkah7WzHb6e9sf1C0laYdRPn9aN+vg= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/grafeas v0.2.0 h1:CYjC+xzdPvbV65gi6Dr4YowKcmLo045pm18L0DhdELM= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0 h1:Fb2iua/5/UBvUuW9PgBinwsCRDi1qoQJEuekOinHFCs= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0 h1:tIqhivE2LMVYkX0BLgG7xL64oNpDaFFI7teunglt1tI= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0 h1:qAJzpxmEX+SeND10Y/4868L5wfZpo4Y3BIEnIieP4dk= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0 h1:qTBOiSnVw7rnW6GVeH5Br8qs80ILoflNgFZySvaT4ek= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0 h1:wzJ9HslsybiJ3HL2168dVonr9D/eBq0VqObiMSCrE6c= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0 h1:mtIQewrz1ewMU3J0vVkUIJtAkpOqgkz4+UmcreeAm08= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0 h1:qDEX/3sipg9dS5JYsAY+YvgTjPR63cozzAWop8oZS94= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0 h1:YfPI4pOYQDcqJ+thM2cGtR9oRoRv42vRfubSPZnk3DI= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0 h1:fkFlXCxkUt3tE8LYtF6CipuPbC/HIrciwDTjFpsTf88= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0 h1:/7sVaMdtqSm6AjxW8KzoM6UKawkg3REr0XJ1zKtidpc= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0 h1:OrwHLSRSZyaiOt3tnY33dsKSedxbMzsXvqB21okItNQ= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0 h1:Vz86uiHCtNGm1DeC32HeG2VXmOq5JRYA3VRPf8ZEcSg= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/recaptchaenterprise v1.3.1 h1:u6EznTGzIdsyOsvm+Xkw0aSuKFXQlyjGE9a4exk6iNQ= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0 h1:BkkI7C0o8CtaHvdDMr5IA+y8pk0Y5wb73C7DHQiAKnw= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0 h1:6w+WxPf2LmUEqX0YyvfCoYb8aBYOcbIV25Vg6R0FLGw= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0 h1:C1tw+Qa/bgm6LoH1wuxYdoyinwKkW/jDJ0GpSJf58cE= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0 h1:gtPd4pG/Go5mrdGQ4MJXxPHtjxtoWUBkrWLXNV1L2TA= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0 h1:Q3W/JsQupZWaoFxUOugZd1Eq590R+Dk6dhacLK2h7+w= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0 h1:Fe1Upic/q4cwqXbInCzgAW35QSerj8JlNwATIxDdfOI= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/secretmanager v1.6.0 h1:5v0zegRMlytVnN7J+bg5Ipqah3I2RZ67ysy00mvA+lA= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0 h1:linnRc3/gJYDfKbAtNixVQ52+66DpOx5MmCz0NNxal8= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0 h1:hKIggnv2eCAPjsVnFcZbytMOsFOk6p4ut0iAUDoNsNA= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0 h1:QmCWml/qvNOYyiPP4G52srYcsHSLCXuvydJDVLTFSe8= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0 h1:bRI2QczZGpcPfuhHr63VOdfyyfYp/43N0wRuBKrd0nQ= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0 h1:wWRIaDURQA8xxHguFCshYepGlrWIrbBnAmc7wfg07qY= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0 h1:6c4pvu3k2idEhJRZnZ2HdVLWZUuT9fsns2gQtCzRtqA= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0 h1:w56i2xl1jHX2tz6rHXBPHd6xujevhImzbc16Kl+V/zQ= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/vision v1.2.0 h1:/CsSTkbmO9HC8iQpxbK8ATms3OQaX3YQUeTMGCxlaK4= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0 h1:eEyIDJ5/98UmQrYZ6eQExUT3iHyDjzzPX29UP6x7ZQo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0 h1:WdHJmLSAs5bIis/WWO7pIfiRBD1PiWe1OAlPrWeM9Tk= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0 h1:0MjX5ugKmTdbRG2Vai5aAgNAOe2wzvs/XQwFDSowy9c= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= +github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY= @@ -68,42 +313,135 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 h1:VQFOLQVL3BrKM/NLO/7FiS4vcp5b github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4R75qFYnO641RZGAoV3uH1c= github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -111,6 +449,9 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -125,8 +466,18 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -135,12 +486,23 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -151,18 +513,105 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 h1:tlyzajkF3030q6M8SvmJSemC9DTHL/xaMa18b65+JM4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.15.3 h1:WYONYL2rxTXtlekAqblR2SCdJsizMDIj/uXb5wNy9zU= +github.com/hashicorp/consul/api v1.15.3/go.mod h1:/g/qgcoBcEXALCNZgRRisyTW0nY86++L0KbeAMXYCeY= +github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= +github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= +github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= +github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/serf v0.9.8 h1:JGklO/2Drf1QGa312EieQN3zhxQ+aJg6pG+aC3MFaVo= +github.com/hashicorp/serf v0.9.8/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -172,59 +621,200 @@ github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/ github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.2.0 h1:NdPpngX0Y6z6XDFKqmFQaE+bCtkqzvQIOt1wvBlAqs8= github.com/jackc/pgx/v5 v5.2.0/go.mod h1:Ptn7zmohNsWEsdxRawMzk3gaKma2obW+NWTnKa0S4nk= +github.com/jackc/puddle/v2 v2.1.2 h1:0f7vaaXINONKTsxYDn4otOAiJanX/BMeAtY//BXqzlg= github.com/jackc/puddle/v2 v2.1.2/go.mod h1:2lpufsF5mRHO6SuZkm0fNYxM6SWHfvyFj62KwNzgels= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0 h1:tEElEatulEHDeedTxwckzyYMA5c86fbmNIUL1hBIiTg= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= +github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= +github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= +github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/crypt v0.8.0 h1:xtk0uUHVWVsRBdEUGYBym4CXbcllXky2M7Qlwsf8C0Y= +github.com/sagikazarmark/crypt v0.8.0/go.mod h1:TmKwZAo97S4Fy4sfMH/HX/cQP5D+ijra2NyLpNNmttY= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= @@ -237,7 +827,10 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -245,34 +838,68 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0= +go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= +go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6GU8= +go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= +go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI= +go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4= +go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI= +go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -284,8 +911,10 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -298,7 +927,10 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= @@ -308,10 +940,15 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -319,9 +956,12 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -339,13 +979,34 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc= +golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -355,6 +1016,22 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -365,25 +1042,41 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -395,6 +1088,8 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -403,18 +1098,56 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -422,14 +1155,19 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -443,6 +1181,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -464,9 +1203,11 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -476,14 +1217,27 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -503,12 +1257,44 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -533,6 +1319,7 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -540,12 +1327,80 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -559,9 +1414,32 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -572,23 +1450,42 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.4.6 h1:1FPESNXqIKG5JmraaH2bfCVlMQ7paLoCreFxDtqzwdc= @@ -602,9 +1499,35 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= +k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= +k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= +k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/job-eks-asff.yaml b/job-eks-asff.yaml index 64d91c59d..7c16bba56 100644 --- a/job-eks-asff.yaml +++ b/job-eks-asff.yaml @@ -40,7 +40,7 @@ spec: "--targets", "node", "--benchmark", - "eks-1.1.0", + "eks-1.2.0", "--asff", ] env: @@ -59,7 +59,7 @@ spec: mountPath: /etc/kubernetes readOnly: true - name: kube-bench-eks-config - mountPath: "/opt/kube-bench/cfg/eks-1.1.0/config.yaml" + mountPath: "/opt/kube-bench/cfg/eks-1.2.0/config.yaml" subPath: config.yaml readOnly: true restartPolicy: Never diff --git a/job-eks.yaml b/job-eks.yaml index d9601839c..beaf39194 100644 --- a/job-eks.yaml +++ b/job-eks.yaml @@ -20,7 +20,7 @@ spec: "--targets", "node", "--benchmark", - "eks-1.1.0", + "eks-1.2.0", ] volumeMounts: - name: var-lib-kubelet From 84f80b59b887fb53348f88ae34bd48996f6bd70d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 May 2023 13:41:30 +0300 Subject: [PATCH 154/262] build(deps): bump alpine from 3.17 to 3.18 (#1443) Bumps alpine from 3.17 to 3.18. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 453382588..76bcae53a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.17 AS run +FROM alpine:3.18 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 From ca8743c1f7f7fbd63ed9d0826076861c8690c420 Mon Sep 17 00:00:00 2001 From: KiranBodipi <62982917+KiranBodipi@users.noreply.github.com> Date: Thu, 1 Jun 2023 19:07:50 +0530 Subject: [PATCH 155/262] add support VMware Tanzu(TKGI) Benchmarks v1.2.53 (#1452) * add Support VMware Tanzu(TKGI) Benchmarks v1.2.53 with this change, we are adding 1. latest kubernetes cis benchmarks for VMware Tanzu1.2.53 2. logic to kube-bench so that kube-bench can auto detect vmware platform, will be able to execute the respective vmware tkgi compliance checks. 3. job-tkgi.yaml file to run the benchmark as a job in tkgi cluster Reference Document for checks: https://network.pivotal.io/products/p-compliance-scanner/#/releases/1248397 * add Support VMware Tanzu(TKGI) Benchmarks v1.2.53 with this change, we are adding 1. latest kubernetes cis benchmarks for VMware Tanzu1.2.53 2. logic to kube-bench so that kube-bench can auto detect vmware platform, will be able to execute the respective vmware tkgi compliance checks. 3. job-tkgi.yaml file to run the benchmark as a job in tkgi cluster Reference Document for checks: https://network.pivotal.io/products/p-compliance-scanner/#/releases/1248397 --- cfg/config.yaml | 7 + cfg/tkgi-1.2.53/config.yaml | 2 + cfg/tkgi-1.2.53/controlplane.yaml | 67 ++ cfg/tkgi-1.2.53/etcd.yaml | 121 ++++ cfg/tkgi-1.2.53/master.yaml | 1098 +++++++++++++++++++++++++++++ cfg/tkgi-1.2.53/node.yaml | 418 +++++++++++ cfg/tkgi-1.2.53/policies.yaml | 287 ++++++++ cmd/util.go | 4 +- docs/platforms.md | 1 + docs/running.md | 15 + job-tkgi.yaml | 54 ++ 11 files changed, 2073 insertions(+), 1 deletion(-) create mode 100644 cfg/tkgi-1.2.53/config.yaml create mode 100644 cfg/tkgi-1.2.53/controlplane.yaml create mode 100644 cfg/tkgi-1.2.53/etcd.yaml create mode 100644 cfg/tkgi-1.2.53/master.yaml create mode 100644 cfg/tkgi-1.2.53/node.yaml create mode 100644 cfg/tkgi-1.2.53/policies.yaml create mode 100644 job-tkgi.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index 861fe45f3..8e06b88b8 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -270,6 +270,7 @@ version_mapping: "aks-1.0": "aks-1.0" "ack-1.0": "ack-1.0" "cis-1.6-k3s": "cis-1.6-k3s" + "tkgi-1.2.53": "tkgi-1.2.53" target_mapping: "cis-1.5": @@ -372,3 +373,9 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "tkgi-1.2.53": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" diff --git a/cfg/tkgi-1.2.53/config.yaml b/cfg/tkgi-1.2.53/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/tkgi-1.2.53/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/tkgi-1.2.53/controlplane.yaml b/cfg/tkgi-1.2.53/controlplane.yaml new file mode 100644 index 000000000..4f3ab677e --- /dev/null +++ b/cfg/tkgi-1.2.53/controlplane.yaml @@ -0,0 +1,67 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users" + audit: ps -ef | grep kube-apiserver | grep -- "--oidc-issuer-url=" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + Exception + This setting is site-specific. It can be set in the "Configure created clusters to use UAA as the OIDC provider." + section of the "UAA" + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-policy-file=" + tests: + test_items: + - flag: "--audit-policy-file" + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns" + audit: | + diff /var/vcap/jobs/kube-apiserver/config/audit_policy.yml \ <(echo "--- apiVersion: audit.k8s.io/v1beta1 kind: + Policy rules: - level: None resources: - group: '' resources: - endpoints - services - services/status users: - + system:kube-proxy verbs: - watch - level: None resources: - group: '' resources: - nodes - nodes/status users: - + kubelet verbs: - get - level: None resources: - group: '' resources: - nodes - nodes/status userGroups: - + system:nodes verbs: - get - level: None namespaces: - kube-system resources: - group: '' resources: - + endpoints users: - system:kube-controller-manager - system:kube-scheduler - system:serviceaccount:kube- + system:endpoint-controller verbs: - get - update - level: None resources: - group: '' resources: - namespaces - + namespaces/status - namespaces/finalize users: - system:apiserver verbs: - get - level: None resources: - + group: metrics.k8s.io users: - system:kube-controller-manager verbs: - get - list - level: None + nonResourceURLs: - \"/healthz*\" - \"/version\" - \"/swagger*\" - level: None resources: - group: '' resources: - + events - level: Request omitStages: - RequestReceived resources: - group: '' resources: - nodes/status - + pods/status userGroups: - system:nodes verbs: - update - patch - level: Request omitStages: - + RequestReceived users: - system:serviceaccount:kube-system:namespace-controller verbs: - deletecollection - + level: Metadata omitStages: - RequestReceived resources: - group: '' resources: - secrets - configmaps - group: + authentication.k8s.io resources: - tokenreviews - level: Request omitStages: - RequestReceived resources: - + group: '' - group: admissionregistration.k8s.io - group: apiextensions.k8s.io - group: apiregistration.k8s.io - + group: apps - group: authentication.k8s.io - group: authorization.k8s.io - group: autoscaling - group: batch - + group: certificates.k8s.io - group: extensions - group: metrics.k8s.io - group: networking.k8s.io - group: policy - + group: rbac.authorization.k8s.io - group: settings.k8s.io - group: storage.k8s.io verbs: - get - list - watch - level: + RequestResponse omitStages: - RequestReceived resources: - group: '' - group: admissionregistration.k8s.io - + group: apiextensions.k8s.io - group: apiregistration.k8s.io - group: apps - group: authentication.k8s.io - group: + authorization.k8s.io - group: autoscaling - group: batch - group: certificates.k8s.io - group: extensions - group: + metrics.k8s.io - group: networking.k8s.io - group: policy - group: rbac.authorization.k8s.io - group: + settings.k8s.io - group: storage.k8s.io - level: Metadata omitStages: - RequestReceived ") + type: "manual" + remediation: | + Consider modification of the audit policy in use on the cluster to include these items, at a + minimum. + scored: false diff --git a/cfg/tkgi-1.2.53/etcd.yaml b/cfg/tkgi-1.2.53/etcd.yaml new file mode 100644 index 000000000..8f99b7f0f --- /dev/null +++ b/cfg/tkgi-1.2.53/etcd.yaml @@ -0,0 +1,121 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration Files" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate" + audit: ps -ef | grep etcd | grep -- "--cert-file=/var/vcap/jobs/etcd/config/etcd.crt" | grep -- "--key-file=/var/vcap/jobs/etcd/config/etcd.key" + type: manual + tests: + bin_op: and + test_items: + - flag: "--cert-file" + - flag: "--key-file" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true" + audit: ps -ef | grep etcd | grep -- "--client\-cert\-auth" + tests: + test_items: + - flag: "--client-cert-auth" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file etcd config on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true" + audit: ps -ef | grep etcd | grep -v -- "--auto-tls" + tests: + test_items: + - flag: "--auto-tls" + compare: + op: eq + value: true + set: false + remediation: | + Edit the etcd pod specification file etcd config on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate" + audit: ps -ef | grep etcd | grep -- "--peer-cert-file=/var/vcap/jobs/etcd/config/peer.crt" | grep -- "--peer-key-file=/var/vcap/jobs/etcd/config/peer.key" + type: manual + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + - flag: "--peer-key-file" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file etcd config on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: false + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true" + audit: ps -ef | grep etcd | grep -- "--peer\-client\-cert\-auth" + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file etcd config on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true" + audit: ps -ef | grep etcd | grep -v -- "--peer-auto-tls" + tests: + test_items: + - flag: "--peer-auto-tls" + compare: + op: eq + value: true + set: false + remediation: | + Edit the etcd pod specification file etcd config on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd" + audit: diff /var/vcap/jobs/kube-apiserver/config/kubernetes-ca.pem /var/vcap/jobs/etcd/config/etcd-ca.crt | grep -c"^>" | grep -v "^0$" + type: manual + tests: + test_items: + - flag: "--trusted-ca-file" + remediation: | + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file etcd config on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/tkgi-1.2.53/master.yaml b/cfg/tkgi-1.2.53/master.yaml new file mode 100644 index 000000000..8d1945700 --- /dev/null +++ b/cfg/tkgi-1.2.53/master.yaml @@ -0,0 +1,1098 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 1 +text: "Master Node Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Master Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-apiserver/config/bpm.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chmod 644 /var/vcap/jobs/kube-apiserver/config/bpm.yml + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kube-apiserver/config/bpm.yml + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chown root:root /var/vcap/jobs/kube-apiserver/config/bpm.yml + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-controller-manager/config/bpm.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chmod 644 /var/vcap/jobs/kube-apiserver/config/bpm.yml + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kube-controller-manager/config/bpm.yml + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-scheduler/config/bpm.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chown 644 /var/vcap/jobs/kube-scheduler/config/bpm.yml + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kube-scheduler/config/bpm.yml + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /var/vcap/jobs/kube-scheduler/config/bpm.yml + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/etcd/config/bpm.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 stat -c permissions=%a /var/vcap/jobs/etcd/config/bpm.yml + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/etcd/config/bpm.yml + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /var/vcap/jobs/etcd/config/bpm.yml + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive" + audit: find ((CNI_DIR))/config/ -type f -not -perm 640 | awk 'END{print NR}' | grep "^0$" + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root" + audit: find ((CNI_DIR))/config/ -type f -not -user root -or -not -group root | awk 'END{print NR}' | grep "^0$" + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive" + audit: stat -c permissions=%a /var/vcap/store/etcd/ + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/vcap/store/etcd/ + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd" + audit: stat -c %U:%G /var/vcap/store/etcd/ + type: manual + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/vcap/store/etcd/ + Exception: All bosh processes run as vcap user + The etcd data directory ownership is vcap:vcap + scored: false + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /etc/kubernetes/admin.conf + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 /etc/kubernetes/admin.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root" + audit: stat -c %U:%G /etc/kubernetes/admin.conf + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /etc/kubernetes/admin.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.15 + text: "Ensure that the scheduler configuration file permissions are set to 644" + audit: stat -c permissions=%a /etc/kubernetes/scheduler.conf + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 /etc/kubernetes/scheduler.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.16 + text: "Ensure that the scheduler configuration file ownership is set to root:root" + audit: stat -c %U:%G /etc/kubernetes/scheduler.conf + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /etc/kubernetes/scheduler.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.17 + text: "Ensure that the controller manager configuration file permissions are set to 644" + audit: stat -c permissions=%a /etc/kubernetes/controller-manager.conf + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 /etc/kubernetes/controller-manager.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.18 + text: "Ensure that the controller manager configuration file ownership is set to root:root" + audit: stat -c %U:%G /etc/kubernetes/controller-manager.conf + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /etc/kubernetes/controller-manager.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root" + audit: | + find -L /var/vcap/jobs/kube-apiserver/config /var/vcap/jobs/kube-controller-manager/config /var/vcap/jobs/kube- + scheduler/config ((CNI_DIR))/config /var/vcap/jobs/etcd/config | sort -u | xargs ls -ld | awk '{ print $3 " " $4}' | + grep -c -v "root root" | grep "^0$" + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown -R root:root /etc/kubernetes/pki/ + Exception + Files are group owned by vcap + scored: false + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive" + audit: | + find -L /var/vcap/jobs/kube-apiserver/config \( -name '*.crt' -or -name '*.pem' \) -and -not -perm 640 | grep -v + "packages/golang" | grep -v "packages/ncp_rootfs" | awk 'END{print NR}' | grep "^0$" + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 644 /etc/kubernetes/pki/*.crt + Exception + Ignoring packages/golang as the package includes test certs used by golang. Ignoring packages/ncp_rootfs on + TKG1 with NSX-T container plugin uses the package is used as the overlay filesystem `mount | grep + "packages/ncp_rootfs"` + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600" + audit: | + find -L /var/vcap/jobs/kube-apiserver/config -name '*.key' -and -not -perm 600 | awk 'END{print NR}' | grep "^0$" + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: eq + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + Exception + Permission on etcd .key files is set to 640, to allow read access to vcap group + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false" + audit: ps -ef | grep kube-apiserver | grep -- "--anonymous-auth=false" + type: manual + tests: + test_items: + - flag: "--anonymous-auth=false" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the below parameter. + --anonymous-auth=false + Exception + The flag is set to true to enable API discoveribility. + "Starting in 1.6, the ABAC and RBAC authorizers require explicit authorization of the system:anonymous user or the + system:unauthenticated group, so legacy policy rules that grant access to the * user or * group do not include + anonymous users." + -authorization-mode is set to RBAC + scored: false + + - id: 1.2.2 + text: "Ensure that the --basic-auth-file argument is not set" + audit: ps -ef | grep kube-apiserver | grep -v -- "--basic-auth-file" + tests: + test_items: + - flag: "--basic-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file kube-apiserver + on the master node and remove the --basic-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --token-auth-file parameter is not set" + audit: ps -ef | grep "/var/vcap/packages/kubernetes/bin/kube-apiserve[r]" | grep -v tini | grep -v -- "--token-auth-file=" + type: manual + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file /var/vcap/packages/kubernetes/bin/kube-apiserve[r] + on the master node and remove the --token-auth-file= parameter. + Exception + Since k8s processes' lifecyle are managed by BOSH, token based authentication is required when processes + restart. The file has 0640 permission and root:vcap ownership + scored: false + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true" + audit: ps -ef | grep kube-apiserver | grep -v -- "--kubelet-https=true" + tests: + test_items: + - flag: "--kubelet-https=true" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -- "--kubelet-client-certificate=/var/vcap/jobs/kube-apiserver/config/kubelet- + client-cert.pem" | grep -- "--kubelet-client-key=/var/vcap/jobs/kube-apiserver/config/kubelet-client-key.pem" + type: manual + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + kube-apiserver on the master node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: false + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate" + audit: ps -ef | grep kube-apiserver | grep -- "--kubelet-certificate-authority=" + type: manual + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + kube-apiserver on the master node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + Exception + JIRA ticket #PKS-696 created to investigate a fix. PR opened to address the issue https://github.com/cloudfoundry- + incubator/kubo-release/pull/179 + scored: false + + - id: 1.2.7 + text: "Ensure API server authorization modes does not include AlwaysAllow" + audit: | + ps -ef | grep kube-apiserver | grep -- "--authorization-mode" && ps -ef | grep kube-apiserver | grep -v -- "-- + authorization-mode=\(\w\+\|,\)*AlwaysAllow\(\w\+\|,\)*" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--authorization-mode=\(\w\+\|,\)*Node\(\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + Exception + This flag can be added using Kubernetes Profiles. Please follow instructions here https://docs.pivotal.io/tkgi/1- + 8/k8s-profiles.html + scored: false + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--authorization-mode=\(\w\+\|,\)*RBAC\(\w\+\|,\)* --" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --authorization-mode parameter to a value that includes RBAC, + for example: + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*EventRateLimit\ + (\w\+\|,\)*" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file kube-apiserver + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + Exception + "Note: This is an Alpha feature in the Kubernetes v1.13" + Control provides rate limiting and is site-specific + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set" + audit: | + ps -ef | grep kube-apiserver | grep -v -- "--enable-admission-plugins=\(\w\+\|,\)*AlwaysAdmit\(\w\+\|,\)*" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*AlwaysPullImages\ + (\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + Exception + "Credentials would be required to pull the private images every time. Also, in trusted + environments, this might increases load on network, registry, and decreases speed. + This setting could impact offline or isolated clusters, which have images pre-loaded and do + not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + TKGi is packages with pre-loaded images. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*SecurityContextDeny\ + (\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + Exception + This setting is site-specific. It can be set in the "Admission Plugins" section of the appropriate "Plan" + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--disable-admission-plugins=\(\w\+\|,\)*ServiceAccount\ + (\w\+\|,\)* --" + tests: + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file kube-apiserver + on the master node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--disable-admission-plugins=\ + (\w\+\|,\)*NamespaceLifecycle\(\w\+\|,\)* --" + tests: + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin PodSecurityPolicy is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*PodSecurityPolicy\ + (\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Follow the documentation and create Pod Security Policy objects as per your environment. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the --enable-admission-plugins parameter to a + value that includes PodSecurityPolicy: + --enable-admission-plugins=...,PodSecurityPolicy,... + Then restart the API Server. + Exception + This setting is site-specific. It can be set in the "Admission Plugins" section of the appropriate "Plan" + scored: false + + - id: 1.2.17 + text: "Ensure that the admission control plugin NodeRestriction is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*NodeRestriction\ + (\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + Exception + PR opened to address the issue https://github.com/cloudfoundry-incubator/kubo-release/pull/179" + scored: true + + - id: 1.2.18 + text: "Ensure that the --insecure-bind-address argument is not set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--insecure-bind-address" + tests: + test_items: + - flag: "--insecure-bind-address" + set: false + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and remove the --insecure-bind-address parameter. + scored: true + + - id: 1.2.19 + text: "Ensure that the --insecure-port argument is set to 0" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--insecure-port=0" + type: manual + tests: + test_items: + - flag: "--insecure-port=0" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the below parameter. + --insecure-port=0 + Exception + Related to 1.2.1 + The insecure port is 8080, and is binding only to localhost on the master node, in use by other components on the + master that are bypassing authn/z. + The components connecting to the APIServer are: + kube-controller-manager + kube-proxy + kube-scheduler + Pods are not scheduled on the master node. + scored: false + + - id: 1.2.20 + text: "Ensure that the --secure-port argument is not set to 0" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--secure-port=0" + tests: + test_items: + - flag: "--secure-port" + compare: + op: noteq + value: 0 + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.21 + text: "Ensure that the --profiling argument is set to false" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--profiling=false" + tests: + test_items: + - flag: "--profiling=false" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-path argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-path=\/var\/vcap\/sys\/log\/kube-apiserver\/audit.log" + type: manual + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example: + --audit-log-path=/var/log/apiserver/audit.log + scored: false + + - id: 1.2.23 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-maxage=30" + type: manual + tests: + test_items: + - flag: "--audit-log-maxage=30" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: + --audit-log-maxage=30 + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false + + - id: 1.2.24 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-maxbackup=10" + type: manual + tests: + test_items: + - flag: "--audit-log-maxbackup=10" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. + --audit-log-maxbackup=10 + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false + + - id: 1.2.25 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-maxsize=100" + type: manual + tests: + test_items: + - flag: "--audit-log-maxsize=100" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB: + --audit-log-maxsize=100 + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false + + - id: 1.2.26 + text: "Ensure that the --request-timeout argument is set as appropriate" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--request-timeout=" + type: manual + tests: + test_items: + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file kube-apiserver + and set the below parameter as appropriate and if needed. + For example, + --request-timeout=300s + scored: false + + - id: 1.2.27 + text: "Ensure that the --service-account-lookup argument is set to true" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--service-account-lookup" + tests: + test_items: + - flag: "--service-account-lookup=true" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.28 + text: "Ensure that the --service-account-key-file argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--service-account-key-file=/var/vcap/jobs/kube- + apiserver/config/service-account-public-key.pem" + type: manual + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --service-account-key-file parameter + to the public key file for service accounts: + --service-account-key-file= + scored: false + + - id: 1.2.29 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--etcd-certfile=/var/vcap/jobs/kube-apiserver/config/etcd- + client.crt" | grep -- "--etcd-keyfile=/var/vcap/jobs/kube-apiserver/config/etcd-client.key" + type: manual + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: false + + - id: 1.2.30 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--tls-cert-file=/var/vcap/jobs/kube-apiserver/config/kubernetes.pem" | grep -- "--tls-private-key-file=/var/vcap/jobs/kube- + apiserver/config/kubernetes-key.pem" + type: manual + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: false + + - id: 1.2.31 + text: "Ensure that the --client-ca-file argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--client-ca-file=/var/vcap/jobs/kube-apiserver/config/kubernetes-ca.pem" + type: manual + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the client certificate authority file. + --client-ca-file= + scored: false + + - id: 1.2.32 + text: "Ensure that the --etcd-cafile argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--etcd-cafile=/var/vcap/jobs/kube-apiserver/config/etcd-ca.crt" + type: manual + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: false + + - id: 1.2.33 + text: "Ensure that the --encryption-provider-config argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--encryption-provider-config=" + type: manual + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= + Exception + Encrypting Secrets in an etcd database can be enabled using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles-encrypt-etcd.html + scored: false + + - id: 1.2.34 + text: "Ensure that the encryption provider is set to aescbc" + audit: | + ENC_CONF=`ps -ef | grep kube-apiserver | grep -v tini | sed $'s/ /\\\\\\n/g' | grep -- '--encryption-provider- + config=' | cut -d'=' -f2` grep -- "- \(aescbc\|kms\|secretbox\):" $ENC_CONF + type: manual + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + Exception + Encrypting Secrets in an etcd database can be enabled using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles-encrypt-etcd.html + scored: false + + - id: 1.2.35 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--tls-cipher-suites=" + type: manual + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the master node and set the below parameter. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM + _SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM + _SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM + _SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate" + audit: ps -ef | grep kube-controller-manager | grep -- "--terminated-pod-gc-threshold=100" + type: manual + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example: + --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure controller manager profiling is disabled" + audit: ps -ef | grep kube-controller-manager | grep -- "--profiling=false" + tests: + test_items: + - flag: "--profiling=false" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true" + audit: ps -ef | grep kube-controller-manager | grep -- "--use\-service\-account\-credentials=true" + tests: + test_items: + - flag: "--use-service-account-credentials=true" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate" + audit: | + ps -ef | grep kube-controller-manager | grep -- "--service\-account\-private\-key\-file=\/var\/vcap\/jobs\/kube\- + controller\-manager\/config\/service\-account\-private\-key.pem" + type: manual + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: false + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate" + audit: | + ps -ef | grep kube-controller-manager | grep -- "--root\-ca\-file=\/var\/vcap\/jobs\/kube\-controller\-manager\/config\/ca.pem" + type: manual + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: false + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true" + audit: | + ps -ef | grep kube-controller-manager | grep -- "--feature-gates=\ + (\w\+\|,\)*RotateKubeletServerCertificate=true\(\w\+\|,\)*" + type: manual + tests: + test_items: + - flag: "--feature-gates=RotateKubeletServerCertificate=true" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + Exception + Certificate rotation is handled by Credhub + scored: false + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1" + audit: | + ps -ef | grep "/var/vcap/packages/kubernetes/bin/kube-controller-manage[r]" | grep -v tini | grep -- "--bind-address=127.0.0.1" + type: manual + tests: + test_items: + - flag: "--bind-address=127.0.0.1" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and ensure the correct value for the --bind-address parameter + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false" + audit: ps -ef | grep kube-scheduler | grep -v tini | grep -- "--profiling=false" + tests: + test_items: + - flag: "--profiling=false" + remediation: | + Edit the Scheduler pod specification file scheduler config file + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1" + audit: ps -ef | grep "/var/vcap/packages/kubernetes/bin/kube-schedule[r]" | grep -v tini | grep -- "--bind-address=127.0.0.1" + type: manual + tests: + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + remediation: | + Edit the Scheduler pod specification file scheduler config + on the master node and ensure the correct value for the --bind-address parameter + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false diff --git a/cfg/tkgi-1.2.53/node.yaml b/cfg/tkgi-1.2.53/node.yaml new file mode 100644 index 000000000..8e0f09591 --- /dev/null +++ b/cfg/tkgi-1.2.53/node.yaml @@ -0,0 +1,418 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kubelet/monit + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 /var/vcap/jobs/kubelet/monit + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kubelet/monit + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root /var/vcap/jobs/kubelet/monit + Exception + File is group owned by vcap + scored: true + + - id: 4.1.3 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-proxy/config/kubeconfig + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 /var/vcap/jobs/kube-proxy/config/kubeconfig + scored: true + + - id: 4.1.4 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kube-proxy/config/kubeconfig + type: manual + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root /var/vcap/jobs/kube-proxy/config/kubeconfig + Exception + File is group owned by vcap + scored: false + + - id: 4.1.5 + text: "Ensure that the kubelet.conf file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-proxy/config/kubeconfig + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 /var/vcap/jobs/kube-proxy/config/kubeconfig + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on worker + scored: false + + - id: 4.1.6 + text: "Ensure that the kubelet.conf file ownership is set to root:root" + audit: stat -c %U:%G /etc/kubernetes/kubelet.conf + type: manual + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root /etc/kubernetes/kubelet.conf + Exception + file ownership is vcap:vcap + scored: false + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kubelet/config/kubelet-client-ca.pem + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 644 + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kubelet/config/kubelet-client-ca.pem + type: manual + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + Exception + File is group owned by vcap + scored: false + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 /var/vcap/jobs/kubelet/config/kubeletconfig.yml + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kubelet/config/kubeletconfig.yml + type: manual + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root /var/vcap/jobs/kubelet/config/kubeletconfig.yml + Exception + File is group owned by vcap + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the anonymous-auth argument is set to false" + audit: grep "^authentication:\n\s{2}anonymous:\n\s{4}enabled:\sfalse$" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "enabled: false" + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow" + audit: | + grep "^authorization:\n\s{2}mode: AlwaysAllow$" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "AlwaysAllow" + set: false + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate" + audit: | + grep ^authentication:\n\s{2}anonymous:\n\s{4}enabled:\sfalse\n(\s{2}webhook:\n\s{4}cacheTTL:\s\d+s\n\s{4}enabled:.*\n)? + \s{2}x509:\n\s{4}clientCAFile:\s"\/var\/vcap\/jobs\/kubelet\/config\/kubelet-client-ca\.pem" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "clientCAFile" + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0" + audit: | + grep "readOnlyPort: 0" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "readOnlyPort: 0" + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0" + audit: | + grep -- "streamingConnectionIdleTimeout: 0" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "streamingConnectionIdleTimeout: 0" + set: false + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true" + audit: | + grep -- "protectKernelDefaults: true" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "protectKernelDefaults: true" + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true" + audit: | + grep -- "makeIPTablesUtilChains: true" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "makeIPTablesUtilChains: true" + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set" + audit: | + ps -ef | grep [k]ubelet | grep -- --[c]onfig=/var/vcap/jobs/kubelet/config/kubeletconfig.yml | grep -v -- --hostname-override + type: manual + remediation: | + Edit the kubelet service file + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Exception + On GCE, the hostname needs to be set to the instance name so the gce cloud provider can manage the instance. + In other cases its set to the IP address of the VM. + scored: false + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture" + audit: grep -- "--event-qps" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + type: manual + tests: + test_items: + - flag: "--event-qps" + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate" + audit: | + grep ^tlsCertFile:\s\"\/var\/vcap\/jobs\/kubelet\/config\/kubelet\.pem\"\ntlsPrivateKeyFile:\s\"\/var\/vcap\/jobs\/kubelet\/config\/kubelet-key\.pem\"$ + /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + bin_op: and + test_items: + - flag: "tlsCertFile" + - flag: "tlsPrivateKeyFile" + remediation: | + If using a Kubelet config file, edit the file to set tlsCertFile to the location + of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false" + audit: ps -ef | grep kubele[t] | grep -- "--rotate-certificates=false" + type: manual + tests: + test_items: + - flag: "--rotate-certificates=false" + set: false + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Exception + Certificate rotation is handled by Credhub + scored: false + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true" + audit: ps -ef | grep kubele[t] | grep -- "--feature-gates=\(\w\+\|,\)*RotateKubeletServerCertificate=true\(\w\+\|,\)*" + type: manual + tests: + test_items: + - flag: "RotateKubeletServerCertificate=true" + remediation: | + Edit the kubelet service file + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Exception + Certificate rotation is handled by Credhub + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers" + audit: ps -ef | grep kubele[t] | grep -- "--tls-cipher- + suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" + type: manual + tests: + test_items: + - flag: --tls-cipher-suites + compare: + op: regex + value: (TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256|TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256|TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305|TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384|TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305|TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384|TLS_RSA_WITH_AES_256_GCM_SHA384|TLS_RSA_WITH_AES_128_GCM_SHA256) + remediation: | + If using a Kubelet config file, edit the file to set TLSCipherSuites: to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/tkgi-1.2.53/policies.yaml b/cfg/tkgi-1.2.53/policies.yaml new file mode 100644 index 000000000..ef5f1ada6 --- /dev/null +++ b/cfg/tkgi-1.2.53/policies.yaml @@ -0,0 +1,287 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + Exception + This is site-specific setting. + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + Exception + This is site-specific setting. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + Exception + This is site-specific setting. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + Exception + This is site-specific setting. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used." + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + Exception + This is site-specific setting. + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + Exception + This is site-specific setting. + scored: false + + - id: 5.2 + text: "Pod Security Policies" + checks: + - id: 5.2.1 + text: "Minimize the admission of privileged containers" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that + the .spec.privileged field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostPID field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostIPC field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostNetwork field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.allowPrivilegeEscalation field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of root containers" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of + UIDs not including 0. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of containers with the NET_RAW capability" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with added capabilities" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in PSPs for the cluster unless + it is set to an empty array. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with capabilities assigned" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + Exception + This is site-specific setting. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports Network Policies" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + Exception + This is site-specific setting. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have Network Policies defined" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + Exception + This is site-specific setting. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using secrets as files over secrets as environment variables" + type: "manual" + remediation: | + if possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + Exception + This is site-specific setting. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + Exception + This is site-specific setting. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + Exception + This is site-specific setting. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + Exception + This is site-specific setting. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions" + type: "manual" + remediation: | + Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you + would need to enable alpha features in the apiserver by passing "--feature- + gates=AllAlpha=true" argument. + Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS + parameter to "--feature-gates=AllAlpha=true" + KUBE_API_ARGS="--feature-gates=AllAlpha=true" + Based on your system, restart the kube-apiserver service. For example: + systemctl restart kube-apiserver.service + Use annotations to enable the docker/default seccomp profile in your pod definitions. An + example is as below: + apiVersion: v1 + kind: Pod + metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + Exception + This is site-specific setting. + scored: false + + - id: 5.7.3 + text: "Apply Security Context to Your Pods and Containers " + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + Exception + This is site-specific setting. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + Exception + This is site-specific setting. + scored: false diff --git a/cmd/util.go b/cmd/util.go index a5765bcd5..5902159e2 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -447,7 +447,7 @@ func getPlatformInfo() Platform { } func getPlatformInfoFromVersion(s string) Platform { - versionRe := regexp.MustCompile(`v(\d+\.\d+)\.\d+-(\w+)(?:[.\-])\w+`) + versionRe := regexp.MustCompile(`v(\d+\.\d+)\.\d+[-+](\w+)(?:[.\-])\w+`) subs := versionRe.FindStringSubmatch(s) if len(subs) < 3 { return Platform{} @@ -479,6 +479,8 @@ func getPlatformBenchmarkVersion(platform Platform) string { case "4.1": return "rh-1.0" } + case "vmware": + return "tkgi-1.2.53" } return "" } diff --git a/docs/platforms.md b/docs/platforms.md index 904d787b8..b02223aa0 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -27,3 +27,4 @@ Some defined by other hardenening guides. | CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | | CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 | | DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS | +| CIS | [TKGI 1.2.53](https://network.pivotal.io/products/p-compliance-scanner#/releases/1248397) | tkgi-1.2.53 | vmware | diff --git a/docs/running.md b/docs/running.md index 67c5e5b0c..fb7fc68b5 100644 --- a/docs/running.md +++ b/docs/running.md @@ -177,3 +177,18 @@ To run the benchmark as a job in your ACK cluster apply the included `job-ack.ya ``` kubectl apply -f job-ack.yaml ``` + +### Running in a VMware TKGI cluster + +| CIS Benchmark | Targets | +|---------------|--------------------------------------------| +| tkgi-1.2.53 | master, etcd, controlplane, node, policies | + +kube-bench includes benchmarks for VMware tkgi platform. +To run this you will need to specify `--benchmark tkgi-1.2.53` when you run the `kube-bench` command. + +To run the benchmark as a job in your VMware tkgi cluster apply the included `job-tkgi.yaml`. + +``` +kubectl apply -f job-tkgi.yaml +``` \ No newline at end of file diff --git a/job-tkgi.yaml b/job-tkgi.yaml new file mode 100644 index 000000000..3ac760c9f --- /dev/null +++ b/job-tkgi.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + spec: + hostPID: true + containers: + - name: kube-bench + image: docker.io/aquasec/kube-bench:latest + command: + [ + "kube-bench", + "run", + "--targets", + "node,policies", + "--benchmark", + "tkgi-1.2.53", + ] + volumeMounts: + - name: var-vcap-jobs + mountPath: /var/vcap/jobs + readOnly: true + - name: var-vcap-packages + mountPath: /var/vcap/packages + readOnly: true + - name: var-vcap-store-etcd + mountPath: /var/vcap/store/etcd + readOnly: true + - name: var-vcap-sys + mountPath: /var/vcap/sys + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + restartPolicy: Never + volumes: + - name: var-vcap-jobs + hostPath: + path: "/var/vcap/jobs" + - name: var-vcap-packages + hostPath: + path: "/var/vcap/packages" + - name: var-vcap-store-etcd + hostPath: + path: "/var/vcap/store/etcd" + - name: var-vcap-sys + hostPath: + path: "/var/vcap/sys" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" From 76c25b2db2f5fc479ad96509779bc069f436aaf5 Mon Sep 17 00:00:00 2001 From: chenk Date: Tue, 6 Jun 2023 17:40:44 +0300 Subject: [PATCH 156/262] release: prepare v0.6.15 (#1455) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 5cebc62e2..879fba1e2 100644 --- a/job.yaml +++ b/job.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: docker.io/aquasec/kube-bench:v0.6.14 + image: docker.io/aquasec/kube-bench:v0.6.15 command: ["kube-bench"] volumeMounts: - name: var-lib-etcd From a727d73e8ab21d611007a383d3aee68e3fc0a51a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 10 Jun 2023 18:07:26 +0300 Subject: [PATCH 157/262] build(deps): bump golang from 1.19.4 to 1.20.4 (#1436) Bumps golang from 1.19.4 to 1.20.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.ubi | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 76bcae53a..a8b6f8445 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.19.4 AS build +FROM golang:1.20.4 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 37654f3aa..2a766f379 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.19.4 AS build +FROM golang:1.20.4 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From e2e353a81ac2d4a36dd54e23097cff9242f139e7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 24 Jun 2023 19:42:03 +0300 Subject: [PATCH 158/262] build(deps): bump actions/setup-go from 3 to 4 (#1402) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3 to 4. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/build.yml | 8 ++++---- .github/workflows/release.yml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c60bdfa55..6eb0c17a7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} - name: Checkout code @@ -41,7 +41,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} - name: Checkout code @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} - name: Checkout code @@ -87,7 +87,7 @@ jobs: needs: [e2e, unit] steps: - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} - name: Checkout code diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 519fdbcda..7f5d6edcc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v4 with: go-version: ${{ env.GO_VERSION }} - name: Checkout code From 40cdc1bfbb1bbc84ef4d54bd1226b73d8f327af3 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Sun, 2 Jul 2023 03:50:07 -0400 Subject: [PATCH 159/262] Fix test_items in cis-1.7 - node - 4.2.12 (#1469) Related issue: https://github.com/aquasecurity/kube-bench/issues/1468 --- cfg/cis-1.7/node.yaml | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/cfg/cis-1.7/node.yaml b/cfg/cis-1.7/node.yaml index c9eded26e..0846fc492 100644 --- a/cfg/cis-1.7/node.yaml +++ b/cfg/cis-1.7/node.yaml @@ -424,16 +424,12 @@ groups: audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/cat $kubeletconf" tests: - bin_op: or test_items: - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' compare: - op: nothave - value: false - - flag: RotateKubeletServerCertificate - path: '{.featureGates.RotateKubeletServerCertificate}' - set: false + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | If using a Kubelet config file, edit the file to set `TLSCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 From aa165518119949b0b3be0ba7adea348214585aad Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Tue, 11 Jul 2023 04:45:06 -0400 Subject: [PATCH 160/262] Fix node.yaml - 4.1.7 and 4.1.8 audit by adding uniq (#1472) --- cfg/ack-1.0/node.yaml | 4 ++-- cfg/cis-1.20/node.yaml | 4 ++-- cfg/cis-1.23/node.yaml | 4 ++-- cfg/cis-1.24/node.yaml | 4 ++-- cfg/cis-1.5/node.yaml | 4 ++-- cfg/cis-1.6/node.yaml | 4 ++-- cfg/cis-1.7/node.yaml | 4 ++-- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/cfg/ack-1.0/node.yaml b/cfg/ack-1.0/node.yaml index bab4b3a12..19a08177e 100644 --- a/cfg/ack-1.0/node.yaml +++ b/cfg/ack-1.0/node.yaml @@ -97,7 +97,7 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi tests: @@ -114,7 +114,7 @@ groups: - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi tests: diff --git a/cfg/cis-1.20/node.yaml b/cfg/cis-1.20/node.yaml index 56081ddad..2bb949964 100644 --- a/cfg/cis-1.20/node.yaml +++ b/cfg/cis-1.20/node.yaml @@ -98,7 +98,7 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi tests: @@ -115,7 +115,7 @@ groups: - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi tests: diff --git a/cfg/cis-1.23/node.yaml b/cfg/cis-1.23/node.yaml index bfec111ca..7f93b334a 100644 --- a/cfg/cis-1.23/node.yaml +++ b/cfg/cis-1.23/node.yaml @@ -97,7 +97,7 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi tests: @@ -114,7 +114,7 @@ groups: - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi tests: diff --git a/cfg/cis-1.24/node.yaml b/cfg/cis-1.24/node.yaml index 8acf6537d..6ee5dbce8 100644 --- a/cfg/cis-1.24/node.yaml +++ b/cfg/cis-1.24/node.yaml @@ -97,7 +97,7 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi tests: @@ -114,7 +114,7 @@ groups: - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi tests: diff --git a/cfg/cis-1.5/node.yaml b/cfg/cis-1.5/node.yaml index dafda4587..1349054c9 100644 --- a/cfg/cis-1.5/node.yaml +++ b/cfg/cis-1.5/node.yaml @@ -106,7 +106,7 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi tests: @@ -124,7 +124,7 @@ groups: - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi tests: diff --git a/cfg/cis-1.6/node.yaml b/cfg/cis-1.6/node.yaml index 78080d90f..e3964158d 100644 --- a/cfg/cis-1.6/node.yaml +++ b/cfg/cis-1.6/node.yaml @@ -98,7 +98,7 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi tests: @@ -115,7 +115,7 @@ groups: - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi tests: diff --git a/cfg/cis-1.7/node.yaml b/cfg/cis-1.7/node.yaml index 0846fc492..d4eabc9f9 100644 --- a/cfg/cis-1.7/node.yaml +++ b/cfg/cis-1.7/node.yaml @@ -97,7 +97,7 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi tests: @@ -114,7 +114,7 @@ groups: - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi tests: From b29ed6b6ed46041ec8df51495be789fc43e6e6f4 Mon Sep 17 00:00:00 2001 From: Devendra Turkar Date: Mon, 24 Jul 2023 12:32:19 +0530 Subject: [PATCH 161/262] chore: add fips compliant images (#1473) For fips complaince we need to generate fips compliant images. As part of this change, we will create new kube-bench image which will be fips compliant. Image name follows this tag pattern -ubi-fips --- .github/workflows/publish.yml | 19 ++++++++++++++ Dockerfile.fips.ubi | 49 +++++++++++++++++++++++++++++++++++ fipsonly.go | 7 +++++ makefile | 3 +++ 4 files changed, 78 insertions(+) create mode 100644 Dockerfile.fips.ubi create mode 100644 fipsonly.go diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index bbf94eb62..625b5bf16 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -83,3 +83,22 @@ jobs: cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release - name: Image digest run: echo ${{ steps.docker_build.outputs.digest }} + + - name: Build and push fips ubi image - Docker/ECR + id: docker_build_fips_ubi + uses: docker/build-push-action@v4 + with: + context: . + platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x + builder: ${{ steps.buildx.outputs.name }} + push: true + file: Dockerfile.fips.ubi + build-args: | + KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + tags: | + ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi-fips + public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi-fips + cache-from: type=local,src=/tmp/.buildx-cache/release + cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release + - name: Image digest + run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi new file mode 100644 index 000000000..eaa82a515 --- /dev/null +++ b/Dockerfile.fips.ubi @@ -0,0 +1,49 @@ +FROM golang:1.20.4 AS build +WORKDIR /go/src/github.com/aquasecurity/kube-bench/ +COPY makefile makefile +COPY go.mod go.sum ./ +COPY main.go . +COPY check/ check/ +COPY cmd/ cmd/ +COPY internal/ internal/ +ARG KUBEBENCH_VERSION +RUN make build-fips && cp kube-bench /go/bin/kube-bench + + +# ubi8-minimal base image for build with ubi standards +FROM registry.access.redhat.com/ubi8/ubi-minimal as run + +RUN microdnf install -y yum findutils openssl \ + && yum -y update-minimal --security --sec-severity=Moderate --sec-severity=Important --sec-severity=Critical \ + && yum update -y \ + && yum install -y glibc \ + && yum update -y glibc \ + && yum install -y procps \ + && yum update -y procps \ + && yum install jq -y \ + && yum clean all \ + && microdnf remove yum || rpm -e -v yum \ + && microdnf clean all + +WORKDIR /opt/kube-bench/ + +ENV PATH=$PATH:/usr/local/mount-from-host/bin + +COPY LICENSE /licenses/LICENSE +COPY --from=build /go/bin/kube-bench /usr/local/bin/kube-bench +COPY entrypoint.sh . +COPY cfg/ cfg/ +ENTRYPOINT ["./entrypoint.sh"] +CMD ["install"] + + +# Build-time metadata as defined at http://label-schema.org +ARG BUILD_DATE +ARG VCS_REF +LABEL org.label-schema.build-date=$BUILD_DATE \ + org.label-schema.name="kube-bench" \ + org.label-schema.description="Run the CIS Kubernetes Benchmark tests" \ + org.label-schema.url="https://github.com/aquasecurity/kube-bench" \ + org.label-schema.vcs-ref=$VCS_REF \ + org.label-schema.vcs-url="https://github.com/aquasecurity/kube-bench" \ + org.label-schema.schema-version="1.0" diff --git a/fipsonly.go b/fipsonly.go new file mode 100644 index 000000000..807bdf301 --- /dev/null +++ b/fipsonly.go @@ -0,0 +1,7 @@ +//go:build fipsonly + +package main + +import ( + _ "crypto/tls/fipsonly" +) diff --git a/makefile b/makefile index 4049c55ec..f9c91fc01 100644 --- a/makefile +++ b/makefile @@ -39,6 +39,9 @@ build: $(BINARY) $(BINARY): $(SOURCES) GOOS=$(GOOS) CGO_ENABLED=0 go build -ldflags "-X github.com/aquasecurity/kube-bench/cmd.KubeBenchVersion=$(KUBEBENCH_VERSION)" -o $(BINARY) . +build-fips: + GOOS=$(GOOS) CGO_ENABLED=0 GOEXPERIMENT=boringcrypto go build -tags fipsonly -ldflags "-X github.com/aquasecurity/kube-bench/cmd.KubeBenchVersion=$(KUBEBENCH_VERSION)" -o $(BINARY) . + # builds the current dev docker version build-docker: docker build --build-arg BUILD_DATE=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ From 9363cdf8ef09bedae0fb3a176af560232d18a4ca Mon Sep 17 00:00:00 2001 From: chenk Date: Mon, 24 Jul 2023 11:01:43 +0300 Subject: [PATCH 162/262] release: prepare v0.6.16-rc (#1476) * release: prepare v0.6.16-rc Signed-off-by: chenk * release: prepare v0.6.16-rc Signed-off-by: chenk --------- Signed-off-by: chenk --- job.yaml | 120 +++++++++++++++++++++++++++---------------------------- 1 file changed, 59 insertions(+), 61 deletions(-) diff --git a/job.yaml b/job.yaml index 879fba1e2..a7e7aaf0e 100644 --- a/job.yaml +++ b/job.yaml @@ -9,79 +9,77 @@ spec: labels: app: kube-bench spec: - hostPID: true containers: - - name: kube-bench - image: docker.io/aquasec/kube-bench:v0.6.15 - command: ["kube-bench"] + - command: ["kube-bench"] + image: docker.io/aquasec/kube-bench:vv0.6.16-rc + name: kube-bench volumeMounts: - - name: var-lib-etcd - mountPath: /var/lib/etcd + - mountPath: /var/lib/etcd + name: var-lib-etcd readOnly: true - - name: var-lib-kubelet - mountPath: /var/lib/kubelet + - mountPath: /var/lib/kubelet + name: var-lib-kubelet readOnly: true - - name: var-lib-kube-scheduler - mountPath: /var/lib/kube-scheduler + - mountPath: /var/lib/kube-scheduler + name: var-lib-kube-scheduler readOnly: true - - name: var-lib-kube-controller-manager - mountPath: /var/lib/kube-controller-manager + - mountPath: /var/lib/kube-controller-manager + name: var-lib-kube-controller-manager readOnly: true - - name: etc-systemd - mountPath: /etc/systemd + - mountPath: /etc/systemd + name: etc-systemd readOnly: true - - name: lib-systemd - mountPath: /lib/systemd/ + - mountPath: /lib/systemd/ + name: lib-systemd readOnly: true - - name: srv-kubernetes - mountPath: /srv/kubernetes/ + - mountPath: /srv/kubernetes/ + name: srv-kubernetes readOnly: true - - name: etc-kubernetes - mountPath: /etc/kubernetes + - mountPath: /etc/kubernetes + name: etc-kubernetes readOnly: true - # /usr/local/mount-from-host/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. - # You can omit this mount if you specify --version as part of the command. - - name: usr-bin - mountPath: /usr/local/mount-from-host/bin + - mountPath: /usr/local/mount-from-host/bin + name: usr-bin readOnly: true - - name: etc-cni-netd - mountPath: /etc/cni/net.d/ + - mountPath: /etc/cni/net.d/ + name: etc-cni-netd readOnly: true - - name: opt-cni-bin - mountPath: /opt/cni/bin/ + - mountPath: /opt/cni/bin/ + name: opt-cni-bin readOnly: true + hostPID: true restartPolicy: Never volumes: - - name: var-lib-etcd - hostPath: - path: "/var/lib/etcd" - - name: var-lib-kubelet - hostPath: - path: "/var/lib/kubelet" - - name: var-lib-kube-scheduler - hostPath: - path: "/var/lib/kube-scheduler" - - name: var-lib-kube-controller-manager - hostPath: - path: "/var/lib/kube-controller-manager" - - name: etc-systemd - hostPath: - path: "/etc/systemd" - - name: lib-systemd - hostPath: - path: "/lib/systemd" - - name: srv-kubernetes - hostPath: - path: "/srv/kubernetes" - - name: etc-kubernetes - hostPath: - path: "/etc/kubernetes" - - name: usr-bin - hostPath: - path: "/usr/bin" - - name: etc-cni-netd - hostPath: - path: "/etc/cni/net.d/" - - name: opt-cni-bin - hostPath: - path: "/opt/cni/bin/" + - hostPath: + path: /var/lib/etcd + name: var-lib-etcd + - hostPath: + path: /var/lib/kubelet + name: var-lib-kubelet + - hostPath: + path: /var/lib/kube-scheduler + name: var-lib-kube-scheduler + - hostPath: + path: /var/lib/kube-controller-manager + name: var-lib-kube-controller-manager + - hostPath: + path: /etc/systemd + name: etc-systemd + - hostPath: + path: /lib/systemd + name: lib-systemd + - hostPath: + path: /srv/kubernetes + name: srv-kubernetes + - hostPath: + path: /etc/kubernetes + name: etc-kubernetes + - hostPath: + path: /usr/bin + name: usr-bin + - hostPath: + path: /etc/cni/net.d/ + name: etc-cni-netd + - hostPath: + path: /opt/cni/bin/ + name: opt-cni-bin From 8c6915c47823637d001eca3e7f3dbd7b60366182 Mon Sep 17 00:00:00 2001 From: chenk Date: Tue, 25 Jul 2023 10:33:54 +0300 Subject: [PATCH 163/262] release: prepare v0.6.16 official (#1479) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index a7e7aaf0e..c690c3f7c 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:vv0.6.16-rc + image: docker.io/aquasec/kube-bench:vv0.6.16 name: kube-bench volumeMounts: - mountPath: /var/lib/etcd From c8cabc4b1498aadf3a753f4746d4ba495f1222bd Mon Sep 17 00:00:00 2001 From: Guille Vigil Date: Tue, 25 Jul 2023 11:30:14 +0200 Subject: [PATCH 164/262] Update job.yaml (#1477) * Update job.yaml Fix on typo for image version * chore: sync with upstream Signed-off-by: chenk --------- Signed-off-by: chenk Co-authored-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index c690c3f7c..a66da6b10 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:vv0.6.16 + image: docker.io/aquasec/kube-bench:v0.6.16 name: kube-bench volumeMounts: - mountPath: /var/lib/etcd From 456684462a0a1613973fcfe8efa5b90c4966cff5 Mon Sep 17 00:00:00 2001 From: chenk Date: Tue, 25 Jul 2023 12:41:24 +0300 Subject: [PATCH 165/262] release: prepare v0.6.17 (#1480) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index a66da6b10..9db178108 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:v0.6.16 + image: docker.io/aquasec/kube-bench:v0.6.17 name: kube-bench volumeMounts: - mountPath: /var/lib/etcd From 20ad80577ce99677607e668f32ef6aa723e50021 Mon Sep 17 00:00:00 2001 From: Jonas-Taha El Sesiy Date: Wed, 26 Jul 2023 17:22:19 +0200 Subject: [PATCH 166/262] Bump docker base images (#1465) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During a recent CVE scan we found kube-bench to use `alpine:3.18` as the final image which has a known high CVE. ``` grype aquasec/kube-bench:v0.6.15 ✔ Vulnerability DB [no update available] ✔ Loaded image ✔ Parsed image ✔ Cataloged packages [73 packages] ✔ Scanning image... [4 vulnerabilities] ├── 0 critical, 4 high, 0 medium, 0 low, 0 negligible └── 4 fixed NAME INSTALLED FIXED-IN TYPE VULNERABILITY SEVERITY libcrypto3 3.1.0-r4 3.1.1-r0 apk CVE-2023-2650 High libssl3 3.1.0-r4 3.1.1-r0 apk CVE-2023-2650 High openssl 3.1.0-r4 3.1.1-r0 apk CVE-2023-2650 High ``` The CVE in question was addressed in the latest [alpine release](https://www.alpinelinux.org/posts/Alpine-3.15.9-3.16.6-3.17.4-3.18.2-released.html), hence updating the dockerfiles accordingly --- Dockerfile | 4 ++-- Dockerfile.ubi | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index a8b6f8445..97d57053b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.4 AS build +FROM golang:1.20.6 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.18 AS run +FROM alpine:3.18.2 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 2a766f379..fb90061eb 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.20.4 AS build +FROM golang:1.20.6 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From d70459b77c28adf7663abbe87458ab109665cb70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 28 Jul 2023 12:12:45 +0300 Subject: [PATCH 167/262] build(deps): bump golang from 1.20.4 to 1.20.6 (#1475) Bumps golang from 1.20.4 to 1.20.6. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile.fips.ubi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi index eaa82a515..1860beb62 100644 --- a/Dockerfile.fips.ubi +++ b/Dockerfile.fips.ubi @@ -1,4 +1,4 @@ -FROM golang:1.20.4 AS build +FROM golang:1.20.6 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 3ef3e9a861b2f7569f23c0760a828d917a8ad0c8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 9 Sep 2023 21:37:29 +0300 Subject: [PATCH 168/262] build(deps): bump alpine from 3.18.2 to 3.18.3 (#1487) Bumps alpine from 3.18.2 to 3.18.3. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 97d57053b..a58c1b0e8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.18.2 AS run +FROM alpine:3.18.3 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 From 34ef478b41693ca6e2c85bfdfbc8b61a3f3ace8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Sep 2023 08:11:27 +0300 Subject: [PATCH 169/262] build(deps): bump goreleaser/goreleaser-action from 4 to 5 (#1495) Bumps [goreleaser/goreleaser-action](https://github.com/goreleaser/goreleaser-action) from 4 to 5. - [Release notes](https://github.com/goreleaser/goreleaser-action/releases) - [Commits](https://github.com/goreleaser/goreleaser-action/compare/v4...v5) --- updated-dependencies: - dependency-name: goreleaser/goreleaser-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- .github/workflows/release.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6eb0c17a7..65731766f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -95,7 +95,7 @@ jobs: with: fetch-depth: 0 - name: Dry-run release snapshot - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v5 with: distribution: goreleaser version: v1.7.0 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7f5d6edcc..a6633d913 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -44,7 +44,7 @@ jobs: second_file_path: integration/testdata/Expected_output.data expected_result: PASSED - name: Release - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v5 with: distribution: goreleaser version: v1.7.0 From e1c6c80d022c1d9053fd6abb265823f70b1b193e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 16 Sep 2023 12:59:20 +0300 Subject: [PATCH 170/262] build(deps): bump golang from 1.20.6 to 1.21.1 (#1494) Bumps golang from 1.20.6 to 1.21.1. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.fips.ubi | 2 +- Dockerfile.ubi | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index a58c1b0e8..d0814af49 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20.6 AS build +FROM golang:1.21.1 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi index 1860beb62..40b1a3e89 100644 --- a/Dockerfile.fips.ubi +++ b/Dockerfile.fips.ubi @@ -1,4 +1,4 @@ -FROM golang:1.20.6 AS build +FROM golang:1.21.1 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi b/Dockerfile.ubi index fb90061eb..c7782a3ce 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.20.6 AS build +FROM golang:1.21.1 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 276d30ad7548ecf87307d98d9318bd6379623930 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 30 Sep 2023 19:34:22 +0300 Subject: [PATCH 171/262] build(deps): bump crazy-max/ghaction-docker-meta from 4 to 5 (#1499) Bumps [crazy-max/ghaction-docker-meta](https://github.com/crazy-max/ghaction-docker-meta) from 4 to 5. - [Release notes](https://github.com/crazy-max/ghaction-docker-meta/releases) - [Upgrade guide](https://github.com/docker/metadata-action/blob/master/UPGRADE.md) - [Commits](https://github.com/crazy-max/ghaction-docker-meta/compare/v4...v5) --- updated-dependencies: - dependency-name: crazy-max/ghaction-docker-meta dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 625b5bf16..aa390d6af 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -41,7 +41,7 @@ jobs: password: ${{ secrets.ECR_SECRET_ACCESS_KEY }} - name: Get version id: get_version - uses: crazy-max/ghaction-docker-meta@v4 + uses: crazy-max/ghaction-docker-meta@v5 with: images: ${{ env.REP }} tag-semver: | From 7ad0f2fee6bf65eab51e7b821175e507ce85f7c9 Mon Sep 17 00:00:00 2001 From: AnaisUrlichs Date: Thu, 10 Aug 2023 08:15:29 +0100 Subject: [PATCH 172/262] updates to the readme Signed-off-by: AnaisUrlichs --- README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 88b7b59fc..52c8f8f56 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,12 @@ Tests are configured with YAML files, making this tool easy to update as test sp ![Kubernetes Bench for Security](/docs/images/output.png "Kubernetes Bench for Security") -### Quick start +## CIS Scanning as part of Trivy and the Trivy Operator + +[Trivy](https://github.com/aquasecurity/trivy), the all in one cloud native security scanner, can be deployed as a [Kubernetes Operator](https://github.com/aquasecurity/trivy-operator) inside a cluster. +Both, the [Trivy CLI](https://github.com/aquasecurity/trivy), and the [Trivy Operator](https://github.com/aquasecurity/trivy-operator) support CIS Kubernetes Benchmark scanning among several other features. + +## Quick start There are multiple ways to run kube-bench. You can run kube-bench inside a pod, but it will need access to the host's PID namespace in order to check the running processes, as well as access to some directories on the host where config files and other files are stored. From 8bc4daae10f4407781f0d3be4a544af410b217fb Mon Sep 17 00:00:00 2001 From: chenk Date: Tue, 17 Oct 2023 11:34:53 +0300 Subject: [PATCH 173/262] release: prepare v0.6.18-rc (#1508) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 9db178108..0e338b795 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:v0.6.17 + image: docker.io/aquasec/kube-bench:v0.6.18-rc name: kube-bench volumeMounts: - mountPath: /var/lib/etcd From 18f8456abdaebd58cafeb90071ca27e3ffee2179 Mon Sep 17 00:00:00 2001 From: chenk Date: Tue, 17 Oct 2023 16:28:52 +0300 Subject: [PATCH 174/262] release: prepare v0.6.18 (#1509) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 0e338b795..96d0c4ed0 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:v0.6.18-rc + image: docker.io/aquasec/kube-bench:v0.6.18 name: kube-bench volumeMounts: - mountPath: /var/lib/etcd From 7f5a2eb78b38fa7698d8d683e95d798183da2ae3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 20 Oct 2023 19:31:35 +0300 Subject: [PATCH 175/262] build(deps): bump docker/build-push-action from 4 to 5 (#1498) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 4 to 5. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v4...v5) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/publish.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index aa390d6af..f1673d6d8 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -49,7 +49,7 @@ jobs: - name: Build and push - Docker/ECR id: docker_build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: context: . platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x @@ -67,7 +67,7 @@ jobs: - name: Build and push ubi image - Docker/ECR id: docker_build_ubi - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: context: . platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x @@ -86,7 +86,7 @@ jobs: - name: Build and push fips ubi image - Docker/ECR id: docker_build_fips_ubi - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: context: . platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x From 55a18aed875c8560a8a6e8380feb984d880f2dd3 Mon Sep 17 00:00:00 2001 From: chenk Date: Mon, 23 Oct 2023 10:03:22 +0300 Subject: [PATCH 176/262] release: prepare-0.6.19 (#1511) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 96d0c4ed0..adf81cefc 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:v0.6.18 + image: docker.io/aquasec/kube-bench:v0.6.19 name: kube-bench volumeMounts: - mountPath: /var/lib/etcd From 2b466ab2398184833c4fe45f5ce999097bcb4835 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Oct 2023 21:35:49 +0300 Subject: [PATCH 177/262] build(deps): bump docker/setup-qemu-action from 2 to 3 (#1503) Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 2 to 3. - [Release notes](https://github.com/docker/setup-qemu-action/releases) - [Commits](https://github.com/docker/setup-qemu-action/compare/v2...v3) --- updated-dependencies: - dependency-name: docker/setup-qemu-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index f1673d6d8..5e943a275 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -17,7 +17,7 @@ jobs: - name: Check Out Repo uses: actions/checkout@v3 - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx id: buildx uses: docker/setup-buildx-action@v2 From 0918b41eca40a91627382eed86e281317f5903c1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 27 Oct 2023 21:45:30 +0300 Subject: [PATCH 178/262] build(deps): bump github.com/golang/glog from 1.0.0 to 1.1.2 (#1489) Bumps [github.com/golang/glog](https://github.com/golang/glog) from 1.0.0 to 1.1.2. - [Release notes](https://github.com/golang/glog/releases) - [Commits](https://github.com/golang/glog/compare/v1.0.0...v1.1.2) --- updated-dependencies: - dependency-name: github.com/golang/glog dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 2 +- go.sum | 927 +-------------------------------------------------------- 2 files changed, 3 insertions(+), 926 deletions(-) diff --git a/go.mod b/go.mod index 7e3718ede..7ad9063f6 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.18.4 github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 github.com/fatih/color v1.14.1 - github.com/golang/glog v1.0.0 + github.com/golang/glog v1.1.2 github.com/magiconair/properties v1.8.7 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 64c2cbbc8..b52cdc5eb 100644 --- a/go.sum +++ b/go.sum @@ -17,272 +17,27 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0 h1:gSmWO7DY1vOm0MVU6DNXM11BWHHsTUmsC5cv1fuW5X8= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0 h1:QqHZT1IMldf/daXoSnkJWBIqGBsw50X+xP6HSVzLRPo= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0 h1:NKw6PpQi6V1O+KsjuTd+bhip9d0REYu4NevC45vtGp8= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0 h1:TCMhwWEWhCn8d44/Zs7UCICTWje9j3HuV6nVGMjdpYw= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0 h1:9yKYCozdh29v7QMx3QBuksZGtPNICFb5SVnyNvkKRGg= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0 h1:qzYOcI6u4CD+0R1E8rWbrqs04fISCcg2YYxW8yBAqFM= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0 h1:IYhjgcgwb5TIAhC0aWQGGOqBnP0c2xijgMGf1iJRs50= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0 h1:U+kHmeKGXgBvTlrecPJhwkItWaIpIscG5DUpQxBQZZg= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0 h1:JuTk8po4bCKRwObdT0zLb1K0BGkGHJdtgs2GK3j2Gws= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0 h1:4RESn+mA7eGPBr5eQ4B/hbkHNivzYHbgRWpdlNeNjiE= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0 h1:5F7dowxGuYQlX3LjfjH/sKf+IvI1TsItTw0sDZmoec4= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0 h1:IL5W4fh6dAq9x1mO+4evrWCISOmPJegdaO0hZRZmWNE= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0 h1:2824iym832ljKdVpCBnpqm5K94YT/uHTVhNF+dRTXPI= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0 h1:xzXGAE2fAuMh+ksODKr9nRv9ega1vHjFwRqMA8tRrVE= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0 h1:CW3541Fm7KPTyZjJdnX6NtaGXYFn5XbFC5UcjgALKvU= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0 h1:fnwkyzCVcPI/TmBheGgpmK2h+hWUIDHcZBincHRhrQ0= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0 h1:dp8jOF21n/7jwgo/uuA0RN8hvLcKO4q6s/yvwevs2ZM= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0 h1:gx9jr41ytcA3dXkbbd409euEaWtofCVXYBvJz3iYm18= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0 h1:ula4YR2K66o5wifLdPQMtR2I6KP+zvqdSEb6ncd1e0g= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0 h1:NU0Pj57H++JQOW225/7o34sUZ4i9/TLfWFOSbI3N1cY= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0 h1:CipwaecNhtsWUSneV2J5y8OqudHqvqPlcMHgSyh8vak= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0 h1:pu3JIgC1rswIqi5romW0JgNO6CTUydLYX8zyjiAvO1c= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0 h1:hd6J2n5dBBRuAqnNUEsKWrp6XNPKsaxwwIyzOPZTokk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/firestore v1.8.0 h1:HokMB9Io0hAyYzlGFeFVMgE3iaPXNvaIsDx5JzblGLI= -cloud.google.com/go/firestore v1.8.0/go.mod h1:r3KB8cAdRIe8znzoPWLw8S6gpDVd9treohhn8b09424= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0 h1:s3Snbr2O4j4p7CuwImBas8rNNmkHS1YJANcCpKGqQSE= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0 h1:PKggmegChZulPW8yvtziF8P9UOuVFwbvylbEucTNups= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0 h1:zAcvDa04tTnGdu6TEZewaLN2tdMtUOJJ7fEceULjguA= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0 h1:JTcTaYQRGsVm+qkah7WzHb6e9sf1C0laYdRPn9aN+vg= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/grafeas v0.2.0 h1:CYjC+xzdPvbV65gi6Dr4YowKcmLo045pm18L0DhdELM= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0 h1:Fb2iua/5/UBvUuW9PgBinwsCRDi1qoQJEuekOinHFCs= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0 h1:tIqhivE2LMVYkX0BLgG7xL64oNpDaFFI7teunglt1tI= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0 h1:qAJzpxmEX+SeND10Y/4868L5wfZpo4Y3BIEnIieP4dk= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0 h1:qTBOiSnVw7rnW6GVeH5Br8qs80ILoflNgFZySvaT4ek= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0 h1:wzJ9HslsybiJ3HL2168dVonr9D/eBq0VqObiMSCrE6c= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0 h1:mtIQewrz1ewMU3J0vVkUIJtAkpOqgkz4+UmcreeAm08= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0 h1:qDEX/3sipg9dS5JYsAY+YvgTjPR63cozzAWop8oZS94= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0 h1:YfPI4pOYQDcqJ+thM2cGtR9oRoRv42vRfubSPZnk3DI= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0 h1:fkFlXCxkUt3tE8LYtF6CipuPbC/HIrciwDTjFpsTf88= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0 h1:/7sVaMdtqSm6AjxW8KzoM6UKawkg3REr0XJ1zKtidpc= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0 h1:OrwHLSRSZyaiOt3tnY33dsKSedxbMzsXvqB21okItNQ= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0 h1:Vz86uiHCtNGm1DeC32HeG2VXmOq5JRYA3VRPf8ZEcSg= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/recaptchaenterprise v1.3.1 h1:u6EznTGzIdsyOsvm+Xkw0aSuKFXQlyjGE9a4exk6iNQ= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0 h1:BkkI7C0o8CtaHvdDMr5IA+y8pk0Y5wb73C7DHQiAKnw= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0 h1:6w+WxPf2LmUEqX0YyvfCoYb8aBYOcbIV25Vg6R0FLGw= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0 h1:C1tw+Qa/bgm6LoH1wuxYdoyinwKkW/jDJ0GpSJf58cE= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0 h1:gtPd4pG/Go5mrdGQ4MJXxPHtjxtoWUBkrWLXNV1L2TA= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0 h1:Q3W/JsQupZWaoFxUOugZd1Eq590R+Dk6dhacLK2h7+w= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0 h1:Fe1Upic/q4cwqXbInCzgAW35QSerj8JlNwATIxDdfOI= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/secretmanager v1.6.0 h1:5v0zegRMlytVnN7J+bg5Ipqah3I2RZ67ysy00mvA+lA= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0 h1:linnRc3/gJYDfKbAtNixVQ52+66DpOx5MmCz0NNxal8= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0 h1:hKIggnv2eCAPjsVnFcZbytMOsFOk6p4ut0iAUDoNsNA= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0 h1:QmCWml/qvNOYyiPP4G52srYcsHSLCXuvydJDVLTFSe8= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0 h1:bRI2QczZGpcPfuhHr63VOdfyyfYp/43N0wRuBKrd0nQ= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0 h1:wWRIaDURQA8xxHguFCshYepGlrWIrbBnAmc7wfg07qY= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0 h1:6c4pvu3k2idEhJRZnZ2HdVLWZUuT9fsns2gQtCzRtqA= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0 h1:w56i2xl1jHX2tz6rHXBPHd6xujevhImzbc16Kl+V/zQ= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/vision v1.2.0 h1:/CsSTkbmO9HC8iQpxbK8ATms3OQaX3YQUeTMGCxlaK4= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0 h1:eEyIDJ5/98UmQrYZ6eQExUT3iHyDjzzPX29UP6x7ZQo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0 h1:WdHJmLSAs5bIis/WWO7pIfiRBD1PiWe1OAlPrWeM9Tk= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0 h1:0MjX5ugKmTdbRG2Vai5aAgNAOe2wzvs/XQwFDSowy9c= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= -github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY= @@ -313,135 +68,42 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 h1:VQFOLQVL3BrKM/NLO/7FiS4vcp5b github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4R75qFYnO641RZGAoV3uH1c= github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -449,9 +111,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -466,18 +125,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -486,23 +135,12 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -513,105 +151,18 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0 h1:SXk3ABtQYDT/OH8jAyvEOQ58mgawq5C4o/4/89qN2ZU= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 h1:tlyzajkF3030q6M8SvmJSemC9DTHL/xaMa18b65+JM4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.15.3 h1:WYONYL2rxTXtlekAqblR2SCdJsizMDIj/uXb5wNy9zU= -github.com/hashicorp/consul/api v1.15.3/go.mod h1:/g/qgcoBcEXALCNZgRRisyTW0nY86++L0KbeAMXYCeY= -github.com/hashicorp/consul/sdk v0.11.0 h1:HRzj8YSCln2yGgCumN5CL8lYlD3gBurnervJRJAZyC4= -github.com/hashicorp/consul/sdk v0.11.0/go.mod h1:yPkX5Q6CsxTFMjQQDJwzeNmUUF5NUGGbrDsv9wTb8cw= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= -github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.3.1 h1:MXgUXLqva1QvpVEDQW1IQLG0wivQAtmFlHRQ+1vWZfM= -github.com/hashicorp/memberlist v0.3.1/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hashicorp/serf v0.9.8 h1:JGklO/2Drf1QGa312EieQN3zhxQ+aJg6pG+aC3MFaVo= -github.com/hashicorp/serf v0.9.8/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -621,200 +172,59 @@ github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/ github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.2.0 h1:NdPpngX0Y6z6XDFKqmFQaE+bCtkqzvQIOt1wvBlAqs8= github.com/jackc/pgx/v5 v5.2.0/go.mod h1:Ptn7zmohNsWEsdxRawMzk3gaKma2obW+NWTnKa0S4nk= -github.com/jackc/puddle/v2 v2.1.2 h1:0f7vaaXINONKTsxYDn4otOAiJanX/BMeAtY//BXqzlg= github.com/jackc/puddle/v2 v2.1.2/go.mod h1:2lpufsF5mRHO6SuZkm0fNYxM6SWHfvyFj62KwNzgels= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.0 h1:tEElEatulEHDeedTxwckzyYMA5c86fbmNIUL1hBIiTg= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= -github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= -github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= -github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= -github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.8.0 h1:xtk0uUHVWVsRBdEUGYBym4CXbcllXky2M7Qlwsf8C0Y= -github.com/sagikazarmark/crypt v0.8.0/go.mod h1:TmKwZAo97S4Fy4sfMH/HX/cQP5D+ijra2NyLpNNmttY= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= @@ -827,10 +237,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= -github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -838,68 +245,34 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/etcd/api/v3 v3.5.5 h1:BX4JIbQ7hl7+jL+g+2j5UAr0o1bctCm6/Ct+ArBGkf0= -go.etcd.io/etcd/api/v3 v3.5.5/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= -go.etcd.io/etcd/client/pkg/v3 v3.5.5 h1:9S0JUVvmrVl7wCF39iTQthdaaNIiAaQbmK75ogO6GU8= -go.etcd.io/etcd/client/pkg/v3 v3.5.5/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= -go.etcd.io/etcd/client/v2 v2.305.5 h1:DktRP60//JJpnPC0VBymAN/7V71GHMdjDCBt4ZPXDjI= -go.etcd.io/etcd/client/v2 v2.305.5/go.mod h1:zQjKllfqfBVyVStbt4FaosoX2iYd8fV/GRy/PbowgP4= -go.etcd.io/etcd/client/v3 v3.5.5 h1:q++2WTJbUgpQu4B6hCuT7VkdwaTP7Qz6Daak3WzbrlI= -go.etcd.io/etcd/client/v3 v3.5.5/go.mod h1:aApjR4WGlSumpnJ2kloS75h6aHUmAyaPLjHMxpc7E7c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -911,10 +284,8 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -927,10 +298,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= @@ -940,15 +308,10 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -956,12 +319,9 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -979,34 +339,13 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc= -golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1016,22 +355,6 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1042,41 +365,25 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1088,8 +395,6 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1098,56 +403,18 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1155,19 +422,14 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1181,7 +443,6 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1203,11 +464,9 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -1217,27 +476,14 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1257,44 +503,12 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0 h1:JxJl2qQ85fRMPNvlZY/enexbxpCjLwGhZUtgfGeQ51I= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1319,7 +533,6 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -1327,80 +540,12 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e h1:S9GbmC1iCgvbLyAokVCwiO6tVIrU9Y7c5oMx1V/ki/Y= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1414,32 +559,9 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1450,42 +572,23 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.4.6 h1:1FPESNXqIKG5JmraaH2bfCVlMQ7paLoCreFxDtqzwdc= @@ -1499,35 +602,9 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= -k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= -k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= -k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= From dc0580cebe0b04a89092685f355dfea21769ed91 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Nov 2023 18:33:42 +0200 Subject: [PATCH 179/262] build(deps): bump golang from 1.21.1 to 1.21.3 (#1507) Bumps golang from 1.21.1 to 1.21.3. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- Dockerfile | 2 +- Dockerfile.fips.ubi | 2 +- Dockerfile.ubi | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index d0814af49..5c00713c0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.1 AS build +FROM golang:1.21.3 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi index 40b1a3e89..2bb64afae 100644 --- a/Dockerfile.fips.ubi +++ b/Dockerfile.fips.ubi @@ -1,4 +1,4 @@ -FROM golang:1.21.1 AS build +FROM golang:1.21.3 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi b/Dockerfile.ubi index c7782a3ce..940c83ec9 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.21.1 AS build +FROM golang:1.21.3 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 63055a73323789c5e8fd771d3071c092f2ef4f4f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 12 Nov 2023 10:47:01 +0200 Subject: [PATCH 180/262] build(deps): bump github.com/fatih/color from 1.14.1 to 1.16.0 (#1520) Bumps [github.com/fatih/color](https://github.com/fatih/color) from 1.14.1 to 1.16.0. - [Release notes](https://github.com/fatih/color/releases) - [Commits](https://github.com/fatih/color/compare/v1.14.1...v1.16.0) --- updated-dependencies: - dependency-name: github.com/fatih/color dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 12 +++++++----- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 7ad9063f6..ae41c4aea 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/aws/aws-sdk-go-v2 v1.18.0 github.com/aws/aws-sdk-go-v2/config v1.18.4 github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 - github.com/fatih/color v1.14.1 + github.com/fatih/color v1.16.0 github.com/golang/glog v1.1.2 github.com/magiconair/properties v1.8.7 github.com/onsi/ginkgo v1.16.5 @@ -41,7 +41,7 @@ require ( github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect @@ -53,7 +53,7 @@ require ( github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect golang.org/x/crypto v0.4.0 // indirect - golang.org/x/sys v0.3.0 // indirect + golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.5.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index b52cdc5eb..178fbe9af 100644 --- a/go.sum +++ b/go.sum @@ -87,8 +87,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= -github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -197,8 +197,8 @@ github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3v github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -411,8 +411,10 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= From fac90f756e5cd976aabcd4ebdb42aad7b58cf6d1 Mon Sep 17 00:00:00 2001 From: Benjamin Schimke Date: Mon, 20 Nov 2023 12:59:32 +0200 Subject: [PATCH 181/262] feat(cis-1.24-microk8s): Add support to CIS-1.24 for microk8s distro (#1510) --- cfg/cis-1.24-microk8s/config.yaml | 2 + cfg/cis-1.24-microk8s/controlplane.yaml | 46 ++ cfg/cis-1.24-microk8s/etcd.yaml | 121 +++ cfg/cis-1.24-microk8s/master.yaml | 948 ++++++++++++++++++++++++ cfg/cis-1.24-microk8s/node.yaml | 462 ++++++++++++ cfg/cis-1.24-microk8s/policies.yaml | 269 +++++++ cfg/config.yaml | 7 + docs/architecture.md | 37 +- docs/platforms.md | 1 + 9 files changed, 1875 insertions(+), 18 deletions(-) create mode 100644 cfg/cis-1.24-microk8s/config.yaml create mode 100644 cfg/cis-1.24-microk8s/controlplane.yaml create mode 100644 cfg/cis-1.24-microk8s/etcd.yaml create mode 100644 cfg/cis-1.24-microk8s/master.yaml create mode 100644 cfg/cis-1.24-microk8s/node.yaml create mode 100644 cfg/cis-1.24-microk8s/policies.yaml diff --git a/cfg/cis-1.24-microk8s/config.yaml b/cfg/cis-1.24-microk8s/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/cis-1.24-microk8s/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/cis-1.24-microk8s/controlplane.yaml b/cfg/cis-1.24-microk8s/controlplane.yaml new file mode 100644 index 000000000..49c4e0016 --- /dev/null +++ b/cfg/cis-1.24-microk8s/controlplane.yaml @@ -0,0 +1,46 @@ +--- +controls: +version: "cis-1.24" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/cis-1.24-microk8s/etcd.yaml b/cfg/cis-1.24-microk8s/etcd.yaml new file mode 100644 index 000000000..b3ba93e37 --- /dev/null +++ b/cfg/cis-1.24-microk8s/etcd.yaml @@ -0,0 +1,121 @@ +--- +controls: +version: "cis-1.24" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Not applicable. MicroK8s used dqlite and the communication to this service is done through a + local socket (/var/snap/microk8s/current/var/kubernetes/backend/kine.sock:12379) accessible + to users with root permissions. + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Not applicable. MicroK8s used dqlite and the communication to this service is done through a + local socket (/var/snap/microk8s/current/var/kubernetes/backend/kine.sock:12379) accessible + to users with root permissions. + scored: false + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Not applicable. MicroK8s used dqlite and the communication to this service is done through a + local socket (/var/snap/microk8s/current/var/kubernetes/backend/kine.sock:12379) accessible + to users with root permissions. + scored: false + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" + audit: "if test -e /var/snap/microk8s/current/var/kubernetes/backend/cluster.crt && test -e /var/snap/microk8s/current/var/kubernetes/backend/cluster.key; then echo 'certs-found'; fi" + tests: + test_items: + - flag: "certs-found" + remediation: | + The certificate pair for dqlite and tls peer communication is + /var/snap/microk8s/current/var/kubernetes/backend/cluster.crt and + /var/snap/microk8s/current/var/kubernetes/backend/cluster.key. + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/cat $etcdconf | /bin/grep enable-tls || true" + tests: + bin_op: or + test_items: + - flag: "--enable-tls" + compare: + op: eq + value: true + - flag: "--enable-tls" + set: false + remediation: | + MicroK8s used dqlite and tls peer communication uses is TLS if the --enable-tls is set in + /var/snap/microk8s/current/args/k8s-dqlite, set to true by default. + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Not applicable. MicroK8s used dqlite and tls peer communication uses the certificates + created upon the snap creation. + scored: false + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + Not applicable. MicroK8s used dqlite and tls peer communication uses the certificates + created upon the snap creation. + scored: false diff --git a/cfg/cis-1.24-microk8s/master.yaml b/cfg/cis-1.24-microk8s/master.yaml new file mode 100644 index 000000000..d7f537333 --- /dev/null +++ b/cfg/cis-1.24-microk8s/master.yaml @@ -0,0 +1,948 @@ +--- +controls: +version: "cis-1.24" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 644 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c permissions=%a $etcdconf; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + find /var/snap/microk8s/current/args/cni-network/10-calico.conflist -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/snap/microk8s/current/args/cni-network/10-calico.conflist -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + # Etcd is not running on MicroK8s master nodes + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + DATA_DIR='/var/snap/microk8s/current/var/kubernetes/backend/' + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/snap/microk8s/current/var/kubernetes/backend/ + scored: true + + # Etcd is not running on MicroK8s master nodes + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: | + DATA_DIR='/var/snap/microk8s/current/var/kubernetes/backend/' + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" + tests: + test_items: + - flag: "root:root" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown root:root /var/snap/microk8s/current/var/kubernetes/backend/ + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/snap/microk8s/current/credentials/client.config; then stat -c permissions=%a /var/snap/microk8s/current/credentials/client.config; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/snap/microk8s/current/credentials/client.config + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/snap/microk8s/current/credentials/client.config; then stat -c %U:%G /var/snap/microk8s/current/credentials/client.config; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /var/snap/microk8s/current/credentials/client.config + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /var/snap/microk8s/current/certs/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /var/snap/microk8s/current/certs/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /var/snap/microk8s/current/certs/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/snap/microk8s/current/certs/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /var/snap/microk8s/current/certs/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/snap/microk8s/current/certs/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + - flag: "--anonymous-auth" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.17 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "cat $apiserverconf | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + # MicroK8s does not use etcd. The API server talk to a local dqlite instance + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: false + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: false + + - id: 1.2.29 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.30 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(cat $apiserverconf | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "cat $schedulerconf | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "cat $schedulerconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.24-microk8s/node.yaml b/cfg/cis-1.24-microk8s/node.yaml new file mode 100644 index 000000000..7b9278a54 --- /dev/null +++ b/cfg/cis-1.24-microk8s/node.yaml @@ -0,0 +1,462 @@ +--- +controls: +version: "cis-1.24" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi' " + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi' " + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi' " + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi' " + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi' " + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi' " + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi' " + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: false + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi' " + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: "{.authorization.mode}" + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: "{.authentication.x509.clientCAFile}" + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: "{.readOnlyPort}" + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: "{.readOnlyPort}" + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: "{.streamingConnectionIdleTimeout}" + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: "{.streamingConnectionIdleTimeout}" + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: "{.protectKernelDefaults}" + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: "{.makeIPTablesUtilChains}" + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: "{.makeIPTablesUtilChains}" + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "cat $kubeletconf" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: "{.eventRecordQPS}" + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: "{.tlsCertFile}" + - flag: --tls-private-key-file + path: "{.tlsPrivateKeyFile}" + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: "{.rotateCertificates}" + compare: + op: eq + value: true + - flag: --rotate-certificates + path: "{.rotateCertificates}" + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: "{.featureGates.RotateKubeletServerCertificate}" + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: "{.featureGates.RotateKubeletServerCertificate}" + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: "{range .tlsCipherSuites[:]}{}{','}{end}" + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/cis-1.24-microk8s/policies.yaml b/cfg/cis-1.24-microk8s/policies.yaml new file mode 100644 index 000000000..3ab3eb4b0 --- /dev/null +++ b/cfg/cis-1.24-microk8s/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "cis-1.24" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/config.yaml b/cfg/config.yaml index 8e06b88b8..a99bd6e9d 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -270,6 +270,7 @@ version_mapping: "aks-1.0": "aks-1.0" "ack-1.0": "ack-1.0" "cis-1.6-k3s": "cis-1.6-k3s" + "cis-1.24-microk8s": "cis-1.24-microk8s" "tkgi-1.2.53": "tkgi-1.2.53" target_mapping: @@ -309,6 +310,12 @@ target_mapping: - "controlplane" - "etcd" - "policies" + "cis-1.24-microk8s": + - "master" + - "etcd" + - "node" + - "controlplane" + - "policies" "cis-1.7": - "master" - "node" diff --git a/docs/architecture.md b/docs/architecture.md index a464afb97..ba5a51c6c 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -13,24 +13,25 @@ Check the contents of the benchmark directory under `cfg` to see which targets a The following table shows the valid targets based on the CIS Benchmark version. -| CIS Benchmark | Targets | -|---------------|---------| -| cis-1.5 | master, controlplane, node, etcd, policies | -| cis-1.6 | master, controlplane, node, etcd, policies | -| cis-1.20 | master, controlplane, node, etcd, policies | -| cis-1.23 | master, controlplane, node, etcd, policies | -| cis-1.24 | master, controlplane, node, etcd, policies | -| cis-1.7 | master, controlplane, node, etcd, policies | -| gke-1.0 | master, controlplane, node, etcd, policies, managedservices | -| gke-1.2.0 | controlplane, node, policies, managedservices | -| eks-1.0.1 | controlplane, node, policies, managedservices | -| eks-1.1.0 | controlplane, node, policies, managedservices | -| eks-1.2.0 | controlplane, node, policies, managedservices | -| ack-1.0 | master, controlplane, node, etcd, policies, managedservices | -| aks-1.0 | controlplane, node, policies, managedservices | -| rh-0.7 | master,node| -| rh-1.0 | master, controlplane, node, etcd, policies | -| cis-1.6-k3s | master, controlplane, node, etcd, policies | +| CIS Benchmark | Targets | +|----------------------|---------| +| cis-1.5 | master, controlplane, node, etcd, policies | +| cis-1.6 | master, controlplane, node, etcd, policies | +| cis-1.20 | master, controlplane, node, etcd, policies | +| cis-1.23 | master, controlplane, node, etcd, policies | +| cis-1.24 | master, controlplane, node, etcd, policies | +| cis-1.7 | master, controlplane, node, etcd, policies | +| gke-1.0 | master, controlplane, node, etcd, policies, managedservices | +| gke-1.2.0 | controlplane, node, policies, managedservices | +| eks-1.0.1 | controlplane, node, policies, managedservices | +| eks-1.1.0 | controlplane, node, policies, managedservices | +| eks-1.2.0 | controlplane, node, policies, managedservices | +| ack-1.0 | master, controlplane, node, etcd, policies, managedservices | +| aks-1.0 | controlplane, node, policies, managedservices | +| rh-0.7 | master,node| +| rh-1.0 | master, controlplane, node, etcd, policies | +| cis-1.6-k3s | master, controlplane, node, etcd, policies | +| cis-1.24-microk8s | master, controlplane, node, etcd, policies | The following table shows the valid DISA STIG versions diff --git a/docs/platforms.md b/docs/platforms.md index b02223aa0..e0429210b 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -26,5 +26,6 @@ Some defined by other hardenening guides. | RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | | CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | | CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 | +| CIS | [1.24-microk8s](https://microk8s.io/docs/how-to-cis-harden) | cis-1.24-microk8s | microk8s v1.28 | | DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS | | CIS | [TKGI 1.2.53](https://network.pivotal.io/products/p-compliance-scanner#/releases/1248397) | tkgi-1.2.53 | vmware | From f8fe5ee173ac72f555391623dc81df6708bc7122 Mon Sep 17 00:00:00 2001 From: Kiran Bodipi <62982917+KiranBodipi@users.noreply.github.com> Date: Sun, 26 Nov 2023 15:57:38 +0530 Subject: [PATCH 182/262] Add CIS Benchmarks support to Rancher Distributions RKE/RKE2/K3s (#1523) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add Support VMware Tanzu(TKGI) Benchmarks v1.2.53 with this change, we are adding 1. latest kubernetes cis benchmarks for VMware Tanzu1.2.53 2. logic to kube-bench so that kube-bench can auto detect vmware platform, will be able to execute the respective vmware tkgi compliance checks. 3. job-tkgi.yaml file to run the benchmark as a job in tkgi cluster Reference Document for checks: https://network.pivotal.io/products/p-compliance-scanner/#/releases/1248397 * add Support VMware Tanzu(TKGI) Benchmarks v1.2.53 with this change, we are adding 1. latest kubernetes cis benchmarks for VMware Tanzu1.2.53 2. logic to kube-bench so that kube-bench can auto detect vmware platform, will be able to execute the respective vmware tkgi compliance checks. 3. job-tkgi.yaml file to run the benchmark as a job in tkgi cluster Reference Document for checks: https://network.pivotal.io/products/p-compliance-scanner/#/releases/1248397 * release: prepare v0.6.15 (#1455) Signed-off-by: chenk * build(deps): bump golang from 1.19.4 to 1.20.4 (#1436) Bumps golang from 1.19.4 to 1.20.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * build(deps): bump actions/setup-go from 3 to 4 (#1402) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3 to 4. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk * Fix test_items in cis-1.7 - node - 4.2.12 (#1469) Related issue: https://github.com/aquasecurity/kube-bench/issues/1468 * Fix node.yaml - 4.1.7 and 4.1.8 audit by adding uniq (#1472) * chore: add fips compliant images (#1473) For fips complaince we need to generate fips compliant images. As part of this change, we will create new kube-bench image which will be fips compliant. Image name follows this tag pattern -ubi-fips * release: prepare v0.6.16-rc (#1476) * release: prepare v0.6.16-rc Signed-off-by: chenk * release: prepare v0.6.16-rc Signed-off-by: chenk --------- Signed-off-by: chenk * release: prepare v0.6.16 official (#1479) Signed-off-by: chenk * Update job.yaml (#1477) * Update job.yaml Fix on typo for image version * chore: sync with upstream Signed-off-by: chenk --------- Signed-off-by: chenk Co-authored-by: chenk * release: prepare v0.6.17 (#1480) Signed-off-by: chenk * Bump docker base images (#1465) During a recent CVE scan we found kube-bench to use `alpine:3.18` as the final image which has a known high CVE. ``` grype aquasec/kube-bench:v0.6.15 ✔ Vulnerability DB [no update available] ✔ Loaded image ✔ Parsed image ✔ Cataloged packages [73 packages] ✔ Scanning image... [4 vulnerabilities] ├── 0 critical, 4 high, 0 medium, 0 low, 0 negligible └── 4 fixed NAME INSTALLED FIXED-IN TYPE VULNERABILITY SEVERITY libcrypto3 3.1.0-r4 3.1.1-r0 apk CVE-2023-2650 High libssl3 3.1.0-r4 3.1.1-r0 apk CVE-2023-2650 High openssl 3.1.0-r4 3.1.1-r0 apk CVE-2023-2650 High ``` The CVE in question was addressed in the latest [alpine release](https://www.alpinelinux.org/posts/Alpine-3.15.9-3.16.6-3.17.4-3.18.2-released.html), hence updating the dockerfiles accordingly * build(deps): bump golang from 1.20.4 to 1.20.6 (#1475) Bumps golang from 1.20.4 to 1.20.6. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * Add CIS Benchmarks support to Rancher Distributions RKE/RKE2/K3s Based on the information furnished in https://ranchermanager.docs.rancher.com/v2.7/pages-for-subheaders/rancher-hardening-guides kube-bench executes CIS-1.23 (Kubernetes v1.23) , CIS-1.24(Kubernetes v1.24),CIS-1.7 (Kubernetes v1.25,v1.26,v1.27) CIS Benchmarks of respective distributions. * RKE/RKE2 CIS Benchmarks Updated the order of checks for RKE and RKE2 Platforms. * fixed vulnerabilities|upgraded package golang.org/x/net to version v0.17.0 * Error handling for RKE Detection Pre-requisites * Based on the information furnished in https://ranchermanager.docs.rancher.com/v2.7/pages-for-subheaders/rancher-hardening-guides#hardening-guides-and-benchmark-versions, kube-bench executes CIS-1.23 (Kubernetes v1.23) , CIS-1.24(Kubernetes v1.24),CIS-1.7 (Kubernetes v1.25,v1.26,v1.27) CIS Benchmarks of respective distributions. updated documentation specific to added rancher platforms * addressed review comments 1.Implemented IsRKE functionality in kube-bench 2. Removed containerd from global level config and accommodated in individual config file 3. Corrected the control id from 1.2.25 to 1.2.23 in master.yaml(k3s-cis-1.23 and k3s-cis-1.24) * Removed unncessary dependency - kubernetes-provider-detector --------- Signed-off-by: chenk Signed-off-by: dependabot[bot] Co-authored-by: chenk Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andy Pitcher Co-authored-by: Devendra Turkar Co-authored-by: Guille Vigil Co-authored-by: Jonas-Taha El Sesiy --- cfg/config.yaml | 81 +++ cfg/k3s-cis-1.23/config.yaml | 46 ++ cfg/k3s-cis-1.23/controlplane.yaml | 47 ++ cfg/k3s-cis-1.23/etcd.yaml | 148 ++++ cfg/k3s-cis-1.23/master.yaml | 986 +++++++++++++++++++++++++ cfg/k3s-cis-1.23/node.yaml | 497 +++++++++++++ cfg/k3s-cis-1.23/policies.yaml | 269 +++++++ cfg/k3s-cis-1.24/config.yaml | 46 ++ cfg/k3s-cis-1.24/controlplane.yaml | 46 ++ cfg/k3s-cis-1.24/etcd.yaml | 148 ++++ cfg/k3s-cis-1.24/master.yaml | 992 +++++++++++++++++++++++++ cfg/k3s-cis-1.24/node.yaml | 462 ++++++++++++ cfg/k3s-cis-1.24/policies.yaml | 269 +++++++ cfg/k3s-cis-1.7/config.yaml | 53 ++ cfg/k3s-cis-1.7/controlplane.yaml | 60 ++ cfg/k3s-cis-1.7/etcd.yaml | 148 ++++ cfg/k3s-cis-1.7/master.yaml | 1000 ++++++++++++++++++++++++++ cfg/k3s-cis-1.7/node.yaml | 457 ++++++++++++ cfg/k3s-cis-1.7/policies.yaml | 318 ++++++++ cfg/rke-cis-1.23/config.yaml | 2 + cfg/rke-cis-1.23/controlplane.yaml | 46 ++ cfg/rke-cis-1.23/etcd.yaml | 149 ++++ cfg/rke-cis-1.23/master.yaml | 1011 ++++++++++++++++++++++++++ cfg/rke-cis-1.23/node.yaml | 473 ++++++++++++ cfg/rke-cis-1.23/policies.yaml | 277 +++++++ cfg/rke-cis-1.24/config.yaml | 2 + cfg/rke-cis-1.24/controlplane.yaml | 46 ++ cfg/rke-cis-1.24/etcd.yaml | 149 ++++ cfg/rke-cis-1.24/master.yaml | 1016 ++++++++++++++++++++++++++ cfg/rke-cis-1.24/node.yaml | 464 ++++++++++++ cfg/rke-cis-1.24/policies.yaml | 277 +++++++ cfg/rke-cis-1.7/config.yaml | 2 + cfg/rke-cis-1.7/controlplane.yaml | 60 ++ cfg/rke-cis-1.7/etcd.yaml | 136 ++++ cfg/rke-cis-1.7/master.yaml | 998 +++++++++++++++++++++++++ cfg/rke-cis-1.7/node.yaml | 465 ++++++++++++ cfg/rke-cis-1.7/policies.yaml | 318 ++++++++ cfg/rke2-cis-1.23/config.yaml | 2 + cfg/rke2-cis-1.23/controlplane.yaml | 50 ++ cfg/rke2-cis-1.23/etcd.yaml | 150 ++++ cfg/rke2-cis-1.23/master.yaml | 1038 +++++++++++++++++++++++++++ cfg/rke2-cis-1.23/node.yaml | 473 ++++++++++++ cfg/rke2-cis-1.23/policies.yaml | 269 +++++++ cfg/rke2-cis-1.24/config.yaml | 2 + cfg/rke2-cis-1.24/controlplane.yaml | 50 ++ cfg/rke2-cis-1.24/etcd.yaml | 150 ++++ cfg/rke2-cis-1.24/master.yaml | 1038 +++++++++++++++++++++++++++ cfg/rke2-cis-1.24/node.yaml | 469 ++++++++++++ cfg/rke2-cis-1.24/policies.yaml | 269 +++++++ cfg/rke2-cis-1.7/config.yaml | 2 + cfg/rke2-cis-1.7/controlplane.yaml | 62 ++ cfg/rke2-cis-1.7/etcd.yaml | 154 ++++ cfg/rke2-cis-1.7/master.yaml | 1027 ++++++++++++++++++++++++++ cfg/rke2-cis-1.7/node.yaml | 461 ++++++++++++ cfg/rke2-cis-1.7/policies.yaml | 304 ++++++++ cmd/util.go | 92 ++- cmd/util_test.go | 36 + docs/platforms.md | 44 +- docs/running.md | 29 +- go.mod | 35 +- go.sum | 94 ++- 61 files changed, 18233 insertions(+), 31 deletions(-) create mode 100644 cfg/k3s-cis-1.23/config.yaml create mode 100644 cfg/k3s-cis-1.23/controlplane.yaml create mode 100644 cfg/k3s-cis-1.23/etcd.yaml create mode 100644 cfg/k3s-cis-1.23/master.yaml create mode 100644 cfg/k3s-cis-1.23/node.yaml create mode 100644 cfg/k3s-cis-1.23/policies.yaml create mode 100644 cfg/k3s-cis-1.24/config.yaml create mode 100644 cfg/k3s-cis-1.24/controlplane.yaml create mode 100644 cfg/k3s-cis-1.24/etcd.yaml create mode 100644 cfg/k3s-cis-1.24/master.yaml create mode 100644 cfg/k3s-cis-1.24/node.yaml create mode 100644 cfg/k3s-cis-1.24/policies.yaml create mode 100644 cfg/k3s-cis-1.7/config.yaml create mode 100644 cfg/k3s-cis-1.7/controlplane.yaml create mode 100644 cfg/k3s-cis-1.7/etcd.yaml create mode 100644 cfg/k3s-cis-1.7/master.yaml create mode 100644 cfg/k3s-cis-1.7/node.yaml create mode 100644 cfg/k3s-cis-1.7/policies.yaml create mode 100644 cfg/rke-cis-1.23/config.yaml create mode 100644 cfg/rke-cis-1.23/controlplane.yaml create mode 100644 cfg/rke-cis-1.23/etcd.yaml create mode 100644 cfg/rke-cis-1.23/master.yaml create mode 100644 cfg/rke-cis-1.23/node.yaml create mode 100644 cfg/rke-cis-1.23/policies.yaml create mode 100644 cfg/rke-cis-1.24/config.yaml create mode 100644 cfg/rke-cis-1.24/controlplane.yaml create mode 100644 cfg/rke-cis-1.24/etcd.yaml create mode 100644 cfg/rke-cis-1.24/master.yaml create mode 100644 cfg/rke-cis-1.24/node.yaml create mode 100644 cfg/rke-cis-1.24/policies.yaml create mode 100644 cfg/rke-cis-1.7/config.yaml create mode 100644 cfg/rke-cis-1.7/controlplane.yaml create mode 100644 cfg/rke-cis-1.7/etcd.yaml create mode 100644 cfg/rke-cis-1.7/master.yaml create mode 100644 cfg/rke-cis-1.7/node.yaml create mode 100644 cfg/rke-cis-1.7/policies.yaml create mode 100644 cfg/rke2-cis-1.23/config.yaml create mode 100644 cfg/rke2-cis-1.23/controlplane.yaml create mode 100644 cfg/rke2-cis-1.23/etcd.yaml create mode 100644 cfg/rke2-cis-1.23/master.yaml create mode 100644 cfg/rke2-cis-1.23/node.yaml create mode 100644 cfg/rke2-cis-1.23/policies.yaml create mode 100644 cfg/rke2-cis-1.24/config.yaml create mode 100644 cfg/rke2-cis-1.24/controlplane.yaml create mode 100644 cfg/rke2-cis-1.24/etcd.yaml create mode 100644 cfg/rke2-cis-1.24/master.yaml create mode 100644 cfg/rke2-cis-1.24/node.yaml create mode 100644 cfg/rke2-cis-1.24/policies.yaml create mode 100644 cfg/rke2-cis-1.7/config.yaml create mode 100644 cfg/rke2-cis-1.7/controlplane.yaml create mode 100644 cfg/rke2-cis-1.7/etcd.yaml create mode 100644 cfg/rke2-cis-1.7/master.yaml create mode 100644 cfg/rke2-cis-1.7/node.yaml create mode 100644 cfg/rke2-cis-1.7/policies.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index a99bd6e9d..fc95ba56e 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -36,6 +36,7 @@ master: - /var/snap/microk8s/current/args/kube-apiserver - /etc/origin/master/master-config.yaml - /etc/kubernetes/manifests/talos-kube-apiserver.yaml + - /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml defaultconf: /etc/kubernetes/manifests/kube-apiserver.yaml scheduler: @@ -53,6 +54,7 @@ master: - /var/snap/microk8s/current/args/kube-scheduler - /etc/origin/master/scheduler.json - /etc/kubernetes/manifests/talos-kube-scheduler.yaml + - /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml defaultconf: /etc/kubernetes/manifests/kube-scheduler.yaml kubeconfig: - /etc/kubernetes/scheduler.conf @@ -77,6 +79,7 @@ master: - /var/snap/kube-controller-manager/current/args - /var/snap/microk8s/current/args/kube-controller-manager - /etc/kubernetes/manifests/talos-kube-controller-manager.yaml + - /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml defaultconf: /etc/kubernetes/manifests/kube-controller-manager.yaml kubeconfig: - /etc/kubernetes/controller-manager.conf @@ -101,6 +104,7 @@ master: - /var/snap/etcd/common/etcd.conf.yaml - /var/snap/microk8s/current/args/etcd - /usr/lib/systemd/system/etcd.service + - /var/lib/rancher/rke2/server/db/etcd/config defaultconf: /etc/kubernetes/manifests/etcd.yaml defaultdatadir: /var/lib/etcd/default.etcd @@ -132,6 +136,9 @@ node: - "/etc/kubernetes/certs/ca.crt" - "/etc/kubernetes/cert/ca.pem" - "/var/snap/microk8s/current/certs/ca.crt" + - "/var/lib/rancher/rke2/agent/server.crt" + - "/var/lib/rancher/rke2/agent/client-ca.crt" + - "/var/lib/rancher/k3s/agent/client-ca.crt" svc: # These paths must also be included # in the 'confs' property below @@ -151,8 +158,12 @@ node: - "/var/lib/kubelet/kubeconfig" - "/etc/kubernetes/kubelet-kubeconfig" - "/etc/kubernetes/kubelet/kubeconfig" + - "/etc/kubernetes/ssl/kubecfg-kube-node.yaml" - "/var/snap/microk8s/current/credentials/kubelet.config" - "/etc/kubernetes/kubeconfig-kubelet" + - "/var/lib/rancher/rke2/agent/kubelet.kubeconfig" + - "/var/lib/rancher/k3s/server/cred/admin.kubeconfig" + - "/var/lib/rancher/k3s/agent/kubelet.kubeconfig" confs: - "/etc/kubernetes/kubelet-config.yaml" - "/var/lib/kubelet/config.yaml" @@ -177,6 +188,8 @@ node: - "/etc/systemd/system/snap.kubelet.daemon.service" - "/etc/systemd/system/snap.microk8s.daemon-kubelet.service" - "/etc/kubernetes/kubelet.yaml" + - "/var/lib/rancher/rke2/agent/kubelet.kubeconfig" + defaultconf: "/var/lib/kubelet/config.yaml" defaultsvc: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" defaultkubeconfig: "/etc/kubernetes/kubelet.conf" @@ -200,8 +213,11 @@ node: - "/etc/kubernetes/kubelet-kubeconfig" - "/etc/kubernetes/kubelet-kubeconfig.conf" - "/etc/kubernetes/kubelet/config" + - "/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml" - "/var/lib/kubelet/kubeconfig" - "/var/snap/microk8s/current/credentials/proxy.config" + - "/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig" + - "/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig" svc: - "/lib/systemd/system/kube-proxy.service" - "/etc/systemd/system/snap.microk8s.daemon-proxy.service" @@ -227,6 +243,8 @@ etcd: - /var/snap/etcd/common/etcd.conf.yaml - /var/snap/microk8s/current/args/etcd - /usr/lib/systemd/system/etcd.service + - /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml + - /var/lib/rancher/k3s/server/db/etcd/config defaultconf: /etc/kubernetes/manifests/etcd.yaml defaultdatadir: /var/lib/etcd/default.etcd @@ -272,6 +290,15 @@ version_mapping: "cis-1.6-k3s": "cis-1.6-k3s" "cis-1.24-microk8s": "cis-1.24-microk8s" "tkgi-1.2.53": "tkgi-1.2.53" + "k3s-cis-1.7": "k3s-cis-1.7" + "k3s-cis-1.23": "k3s-cis-1.23" + "k3s-cis-1.24": "k3s-cis-1.24" + "rke-cis-1.7": "rke-cis-1.7" + "rke-cis-1.23": "rke-cis-1.23" + "rke-cis-1.24": "rke-cis-1.24" + "rke2-cis-1.7": "rke2-cis-1.7" + "rke2-cis-1.23": "rke2-cis-1.23" + "rke2-cis-1.24": "rke2-cis-1.24" target_mapping: "cis-1.5": @@ -386,3 +413,57 @@ target_mapping: - "controlplane" - "node" - "policies" + "k3s-cis-1.7": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "k3s-cis-1.23": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "k3s-cis-1.24": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "rke-cis-1.7": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "rke-cis-1.23": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "rke-cis-1.24": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "rke2-cis-1.7": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "rke2-cis-1.23": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "rke2-cis-1.24": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" diff --git a/cfg/k3s-cis-1.23/config.yaml b/cfg/k3s-cis-1.23/config.yaml new file mode 100644 index 000000000..32033c099 --- /dev/null +++ b/cfg/k3s-cis-1.23/config.yaml @@ -0,0 +1,46 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - apiserver + - scheduler + - controllermanager + - etcd + - policies + + apiserver: + bins: + - containerd + + scheduler: + bins: + - containerd + + controllermanager: + bins: + - containerd + + etcd: + bins: + - containerd + + node: + components: + - kubelet + - proxy + + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + + policies: + components: + - policies diff --git a/cfg/k3s-cis-1.23/controlplane.yaml b/cfg/k3s-cis-1.23/controlplane.yaml new file mode 100644 index 000000000..1aec579e5 --- /dev/null +++ b/cfg/k3s-cis-1.23/controlplane.yaml @@ -0,0 +1,47 @@ +--- +controls: +version: "k3s-cis-1.23" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + type: "manual" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/k3s-cis-1.23/etcd.yaml b/cfg/k3s-cis-1.23/etcd.yaml new file mode 100644 index 000000000..1bbb60d43 --- /dev/null +++ b/cfg/k3s-cis-1.23/etcd.yaml @@ -0,0 +1,148 @@ +--- +controls: +version: "k3s-cis-1.23" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "check_for_k3s_etcd.sh 2.1" + tests: + bin_op: and + test_items: + - flag: "cert-file" + env: "ETCD_CERT_FILE" + set: true + - flag: "key-file" + env: "ETCD_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "check_for_k3s_etcd.sh 2.2" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + set: true + - flag: "client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "check_for_k3s_etcd.sh 2.3" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" + audit: "check_for_k3s_etcd.sh 2.4" + tests: + bin_op: and + test_items: + - flag: "cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "check_for_k3s_etcd.sh 2.5" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + set: true + - flag: "client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "check_for_k3s_etcd.sh 2.6" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "check_for_k3s_etcd.sh 2.7" + tests: + test_items: + - flag: "trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/k3s-cis-1.23/master.yaml b/cfg/k3s-cis-1.23/master.yaml new file mode 100644 index 000000000..08d7e7485 --- /dev/null +++ b/cfg/k3s-cis-1.23/master.yaml @@ -0,0 +1,986 @@ +--- +controls: +version: "k3s-cis-1.23" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 644 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c permissions=%a $etcdconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" + type: "skip" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + type: "skip" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: "check_for_k3s_etcd.sh 1.1.11" + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + type: "skip" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig'" + type: "skip" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /var/lib/rancher/k3s/server/tls | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual)" + audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.crt" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 644 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.key" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + type: manual + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-https'" + type: "skip" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "check_for_k3s_etcd.sh 1.2.29" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: "grep aescbc /path/to/encryption-config.json" + type: "manual" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'" + type: "skip" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/k3s-cis-1.23/node.yaml b/cfg/k3s-cis-1.23/node.yaml new file mode 100644 index 000000000..211cdbc71 --- /dev/null +++ b/cfg/k3s-cis-1.23/node.yaml @@ -0,0 +1,497 @@ +--- +controls: +version: "k3s-cis-1.23" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 644 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" + audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" + audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubelet.kubeconfig ' + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: 'stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig' + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)" + audit: "stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt" + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + - flag: "444" + compare: + op: eq + value: "444" + set: true + - flag: "440" + compare: + op: eq + value: "440" + set: true + - flag: "400" + compare: + op: eq + value: "400" + set: true + - flag: "000" + compare: + op: eq + value: "000" + set: true + bin_op: or + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 644 + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + set: true + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' ' + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + set: true + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults'" + type: "skip" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + set: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'" + type: "skip" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + set: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + type: "skip" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + type: "manual" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --tls-cert-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.crt' + - flag: --tls-private-key-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + type: "skip" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + type: "skip" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + type: "manual" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/k3s-cis-1.23/policies.yaml b/cfg/k3s-cis-1.23/policies.yaml new file mode 100644 index 000000000..7faffa3ee --- /dev/null +++ b/cfg/k3s-cis-1.23/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "k3s-cis-1.23" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/k3s-cis-1.24/config.yaml b/cfg/k3s-cis-1.24/config.yaml new file mode 100644 index 000000000..32033c099 --- /dev/null +++ b/cfg/k3s-cis-1.24/config.yaml @@ -0,0 +1,46 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - apiserver + - scheduler + - controllermanager + - etcd + - policies + + apiserver: + bins: + - containerd + + scheduler: + bins: + - containerd + + controllermanager: + bins: + - containerd + + etcd: + bins: + - containerd + + node: + components: + - kubelet + - proxy + + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + + policies: + components: + - policies diff --git a/cfg/k3s-cis-1.24/controlplane.yaml b/cfg/k3s-cis-1.24/controlplane.yaml new file mode 100644 index 000000000..fd9766efb --- /dev/null +++ b/cfg/k3s-cis-1.24/controlplane.yaml @@ -0,0 +1,46 @@ +--- +controls: +version: "k3s-cis-1.24" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/k3s-cis-1.24/etcd.yaml b/cfg/k3s-cis-1.24/etcd.yaml new file mode 100644 index 000000000..d797f56c7 --- /dev/null +++ b/cfg/k3s-cis-1.24/etcd.yaml @@ -0,0 +1,148 @@ +--- +controls: +version: "k3s-cis-1.24" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "check_for_k3s_etcd.sh 2.1" + tests: + bin_op: and + test_items: + - flag: "cert-file" + env: "ETCD_CERT_FILE" + set: true + - flag: "key-file" + env: "ETCD_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "check_for_k3s_etcd.sh 2.2" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + set: true + - flag: "client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "check_for_k3s_etcd.sh 2.3" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" + audit: "check_for_k3s_etcd.sh 2.4" + tests: + bin_op: and + test_items: + - flag: "cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "check_for_k3s_etcd.sh 2.5" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + set: true + - flag: "client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "check_for_k3s_etcd.sh 2.6" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "check_for_k3s_etcd.sh 2.7" + tests: + test_items: + - flag: "trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/k3s-cis-1.24/master.yaml b/cfg/k3s-cis-1.24/master.yaml new file mode 100644 index 000000000..ce57bc871 --- /dev/null +++ b/cfg/k3s-cis-1.24/master.yaml @@ -0,0 +1,992 @@ +--- +controls: +version: "k3s-cis-1.24" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 644 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c permissions=%a $etcdconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)" + type: "skip" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: "check_for_k3s_etcd.sh 1.1.11" + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + type: "skip" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig'" + type: "skip" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /var/lib/rancher/k3s/server/tls | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.crt" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.key" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-https'" + type: "skip" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "check_for_k3s_etcd.sh 1.2.29" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Automated)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'" + type: "skip" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/k3s-cis-1.24/node.yaml b/cfg/k3s-cis-1.24/node.yaml new file mode 100644 index 000000000..3b9509103 --- /dev/null +++ b/cfg/k3s-cis-1.24/node.yaml @@ -0,0 +1,462 @@ +--- +controls: +version: "k3s-cis-1.24" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubelet.kubeconfig ' + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: 'stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig' + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: "stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + set: true + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' ' + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + set: true + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults'" + type: "skip" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + set: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'" + type: "skip" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + set: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + type: "skip" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --tls-cert-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.crt' + - flag: --tls-private-key-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/k3s-cis-1.24/policies.yaml b/cfg/k3s-cis-1.24/policies.yaml new file mode 100644 index 000000000..33890fd57 --- /dev/null +++ b/cfg/k3s-cis-1.24/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "k3s-cis-1.24" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/k3s-cis-1.7/config.yaml b/cfg/k3s-cis-1.7/config.yaml new file mode 100644 index 000000000..816af1e8e --- /dev/null +++ b/cfg/k3s-cis-1.7/config.yaml @@ -0,0 +1,53 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - apiserver + - kubelet + - scheduler + - controllermanager + - etcd + - policies + + apiserver: + bins: + - containerd + + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + + scheduler: + bins: + - containerd + + controllermanager: + bins: + - containerd + + etcd: + bins: + - containerd + + node: + components: + - kubelet + - proxy + + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + + policies: + components: + - policies diff --git a/cfg/k3s-cis-1.7/controlplane.yaml b/cfg/k3s-cis-1.7/controlplane.yaml new file mode 100644 index 000000000..fa63febda --- /dev/null +++ b/cfg/k3s-cis-1.7/controlplane.yaml @@ -0,0 +1,60 @@ +--- +controls: +version: "k3s-cis-1.7" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/k3s-cis-1.7/etcd.yaml b/cfg/k3s-cis-1.7/etcd.yaml new file mode 100644 index 000000000..1535ea606 --- /dev/null +++ b/cfg/k3s-cis-1.7/etcd.yaml @@ -0,0 +1,148 @@ +--- +controls: +version: "k3s-cis-1.7" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "check_for_k3s_etcd.sh 2.1" + tests: + bin_op: and + test_items: + - flag: "cert-file" + env: "ETCD_CERT_FILE" + set: true + - flag: "key-file" + env: "ETCD_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "check_for_k3s_etcd.sh 2.2" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + set: true + - flag: "client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "check_for_k3s_etcd.sh 2.3" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" + audit: "check_for_k3s_etcd.sh 2.4" + tests: + bin_op: and + test_items: + - flag: "cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "check_for_k3s_etcd.sh 2.5" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + set: true + - flag: "client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "check_for_k3s_etcd.sh 2.6" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "check_for_k3s_etcd.sh 2.7" + tests: + test_items: + - flag: "trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/k3s-cis-1.7/master.yaml b/cfg/k3s-cis-1.7/master.yaml new file mode 100644 index 000000000..8c59d61e6 --- /dev/null +++ b/cfg/k3s-cis-1.7/master.yaml @@ -0,0 +1,1000 @@ +--- +controls: +version: "k3s-cis-1.7" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + Not Applicable. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + Not Applicable. + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + Not Applicable. + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + Not Applicable. + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + Not Applicable. + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + Not Applicable. + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + Not Applicable. + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + Not Applicable. + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + Not Applicable. + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + type: skip + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + Not Applicable. + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: "check_for_k3s_etcd.sh 1.1.11" + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + type: "skip" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + Not Applicable. + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/cred/controller.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /var/lib/rancher/k3s/server/tls + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.crt" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.key" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 - NoteThis recommendation is obsolete and will be deleted per the consensus process (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.17 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + Permissive. + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + Permissive. + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + Permissive. + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + Permissive. + scored: true + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + Permissive. + scored: false + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "check_for_k3s_etcd.sh 1.2.29" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.29 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + type: "skip" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + Permissive - Enabling encryption changes how data can be recovered as data is encrypted. + scored: false + + - id: 1.2.30 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: "skip" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + Permissive - Enabling encryption changes how data can be recovered as data is encrypted. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + type: "skip" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + Not Applicable. + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/k3s-cis-1.7/node.yaml b/cfg/k3s-cis-1.7/node.yaml new file mode 100644 index 000000000..9941e6cfb --- /dev/null +++ b/cfg/k3s-cis-1.7/node.yaml @@ -0,0 +1,457 @@ +--- +controls: +version: "k3s-cis-1.7" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + Not Applicable - All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + Not Applicable. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' + tests: + bin_op: or + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: 'stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: "stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + Not Applicable. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' ' + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' " + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + type: "skip" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Permissive. + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + type: "skip" + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + Not Applicable. + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + type: "skip" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --tls-cert-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.crt' + - flag: --tls-private-key-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Not Applicable. + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false diff --git a/cfg/k3s-cis-1.7/policies.yaml b/cfg/k3s-cis-1.7/policies.yaml new file mode 100644 index 000000000..3b1d1ef17 --- /dev/null +++ b/cfg/k3s-cis-1.7/policies.yaml @@ -0,0 +1,318 @@ +--- +controls: +version: "k3s-cis-1.7" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "skip" + audit: check_for_default_sa.sh + tests: + test_items: + - flag: "true" + compare: + op: eq + value: "true" + set: true + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + Permissive - Kubernetes provides default service accounts to be used. + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: true + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "skip" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + Permissive - Enabling Network Policies can prevent certain applications from communicating with each other. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "skip" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + Permissive - Kubernetes provides a default namespace. + scored: false diff --git a/cfg/rke-cis-1.23/config.yaml b/cfg/rke-cis-1.23/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rke-cis-1.23/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rke-cis-1.23/controlplane.yaml b/cfg/rke-cis-1.23/controlplane.yaml new file mode 100644 index 000000000..4dab24671 --- /dev/null +++ b/cfg/rke-cis-1.23/controlplane.yaml @@ -0,0 +1,46 @@ +--- +controls: +version: "rke-cis-1.23" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/rke-cis-1.23/etcd.yaml b/cfg/rke-cis-1.23/etcd.yaml new file mode 100644 index 000000000..f0325e02f --- /dev/null +++ b/cfg/rke-cis-1.23/etcd.yaml @@ -0,0 +1,149 @@ +--- +controls: +version: "rke-cis-1.23" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + set: true + - flag: "--key-file" + env: "ETCD_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + set: true + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-client-cert-auth" + set: true + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: true diff --git a/cfg/rke-cis-1.23/master.yaml b/cfg/rke-cis-1.23/master.yaml new file mode 100644 index 000000000..bc2338215 --- /dev/null +++ b/cfg/rke-cis-1.23/master.yaml @@ -0,0 +1,1011 @@ +--- +controls: +version: "rke-cis-1.23" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Clusters provisioned by RKE do not require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: stat -c %a /node/var/lib/etcd + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + type: "skip" + audit: "stat -c %U:%G /node/var/lib/etcd" + tests: + test_items: + - flag: "etcd:etcd" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + A cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "check_files_owner_in_dir.sh /node/etc/kubernetes/ssl" + tests: + test_items: + - flag: "true" + compare: + op: eq + value: "true" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 644 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: true + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + type: "manual" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + type: "manual" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + set: true + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + set: true + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + set: true + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: "skip" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + Enabling encryption changes how data can be recovered as data is encrypted. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + type: "manual" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: true + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + Cluster provisioned by RKE handles certificate rotation directly through RKE. + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/rke-cis-1.23/node.yaml b/cfg/rke-cis-1.23/node.yaml new file mode 100644 index 000000000..12d3679f1 --- /dev/null +++ b/cfg/rke-cis-1.23/node.yaml @@ -0,0 +1,473 @@ +--- +controls: +version: "rke-cis-1.23" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e /node$kubeletkubeconfig; then stat -c permissions=%a /node$kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e /node$kubeletkubeconfig; then stat -c %U:%G /node$kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" + audit: "stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 644 + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + + Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + + Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + System level configurations are required prior to provisioning the cluster in order for this argument to be set to true. + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + type: "skip" + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors + scored: false + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Clusters provisioned by RKE handles certificate rotation directly through RKE. + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true diff --git a/cfg/rke-cis-1.23/policies.yaml b/cfg/rke-cis-1.23/policies.yaml new file mode 100644 index 000000000..611d2c82c --- /dev/null +++ b/cfg/rke-cis-1.23/policies.yaml @@ -0,0 +1,277 @@ +--- +controls: +version: "rke-cis-1.23" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Automated)" + type: "skip" + audit: check_for_default_sa.sh + tests: + test_items: + - flag: "true" + compare: + op: eq + value: "true" + set: true + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "skip" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "skip" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/rke-cis-1.24/config.yaml b/cfg/rke-cis-1.24/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rke-cis-1.24/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rke-cis-1.24/controlplane.yaml b/cfg/rke-cis-1.24/controlplane.yaml new file mode 100644 index 000000000..5cd375933 --- /dev/null +++ b/cfg/rke-cis-1.24/controlplane.yaml @@ -0,0 +1,46 @@ +--- +controls: +version: "rke-cis-1.24" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/rke-cis-1.24/etcd.yaml b/cfg/rke-cis-1.24/etcd.yaml new file mode 100644 index 000000000..835e43c41 --- /dev/null +++ b/cfg/rke-cis-1.24/etcd.yaml @@ -0,0 +1,149 @@ +--- +controls: +version: "rke-cis-1.24" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + set: true + - flag: "--key-file" + env: "ETCD_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + set: true + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-client-cert-auth" + set: true + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: true diff --git a/cfg/rke-cis-1.24/master.yaml b/cfg/rke-cis-1.24/master.yaml new file mode 100644 index 000000000..394cff4e5 --- /dev/null +++ b/cfg/rke-cis-1.24/master.yaml @@ -0,0 +1,1016 @@ +--- +controls: +version: "rke-cis-1.24" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: stat -c %a /node/var/lib/etcd + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + type: "skip" + audit: "stat -c %U:%G /node/var/lib/etcd" + tests: + test_items: + - flag: "etcd:etcd" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "check_files_owner_in_dir.sh /node/etc/kubernetes/ssl" + tests: + test_items: + - flag: "true" + compare: + op: eq + value: "true" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' -exec chmod -R 600 {} + + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /node/etc/kubernetes/ssl/ -name '*key.pem' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + find /node/etc/kubernetes/ssl/ -name '*key.pem' -exec chmod -R 600 {} + + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: true + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + set: true + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + set: true + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + set: true + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: "skip" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + Enabling encryption changes how data can be recovered as data is encrypted. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: true + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + Cluster provisioned by RKE handles certificate rotation directly through RKE. + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/rke-cis-1.24/node.yaml b/cfg/rke-cis-1.24/node.yaml new file mode 100644 index 000000000..ce7762baa --- /dev/null +++ b/cfg/rke-cis-1.24/node.yaml @@ -0,0 +1,464 @@ +--- +controls: +version: "rke-cis-1.24" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: true + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + System level configurations are required prior to provisioning the cluster in order for this argument to be set to true. + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + type: "skip" + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors + scored: false + + - id: 4.2.9 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Clusters provisioned by RKE handles certificate rotation directly through RKE. + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true diff --git a/cfg/rke-cis-1.24/policies.yaml b/cfg/rke-cis-1.24/policies.yaml new file mode 100644 index 000000000..7a8b6f53b --- /dev/null +++ b/cfg/rke-cis-1.24/policies.yaml @@ -0,0 +1,277 @@ +--- +controls: +version: "rke-cis-1.24" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "skip" + audit: check_for_default_sa.sh + tests: + test_items: + - flag: "true" + compare: + op: eq + value: "true" + set: true + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "skip" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "skip" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/rke-cis-1.7/config.yaml b/cfg/rke-cis-1.7/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rke-cis-1.7/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rke-cis-1.7/controlplane.yaml b/cfg/rke-cis-1.7/controlplane.yaml new file mode 100644 index 000000000..f3bf99172 --- /dev/null +++ b/cfg/rke-cis-1.7/controlplane.yaml @@ -0,0 +1,60 @@ +--- +controls: +version: "rke-cis-1.7" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/rke-cis-1.7/etcd.yaml b/cfg/rke-cis-1.7/etcd.yaml new file mode 100644 index 000000000..636ca906d --- /dev/null +++ b/cfg/rke-cis-1.7/etcd.yaml @@ -0,0 +1,136 @@ +--- +controls: +version: "rke-cis-1.7" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: true diff --git a/cfg/rke-cis-1.7/master.yaml b/cfg/rke-cis-1.7/master.yaml new file mode 100644 index 000000000..2fceed30b --- /dev/null +++ b/cfg/rke-cis-1.7/master.yaml @@ -0,0 +1,998 @@ +--- +controls: +version: "rke-cis-1.7" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: stat -c %a /node/var/lib/etcd + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + type: "skip" + audit: "stat -c %U:%G /node/var/lib/etcd" + tests: + test_items: + - flag: "etcd:etcd" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + Permissive - A system service account is required for etcd data directory ownership. + Refer to Rancher's hardening guide for more details on how to configure this ownership. + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + Not Applicable - Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + Not Applicable - Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "check_files_owner_in_dir.sh /node/etc/kubernetes/ssl" + tests: + test_items: + - flag: "true" + compare: + op: eq + value: "true" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' -exec chmod -R 600 {} + + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /node/etc/kubernetes/ssl/ -name '*key.pem' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + find /node/etc/kubernetes/ssl/ -name '*key.pem' -exec chmod -R 600 {} + + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: true + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 - NoteThis recommendation is obsolete and will be deleted per the consensus process (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.17 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.29 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + Permissive - Enabling encryption changes how data can be recovered as data is encrypted. + scored: false + + - id: 1.2.30 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: "skip" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + Permissive - Enabling encryption changes how data can be recovered as data is encrypted. + scored: false + + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: true + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + Cluster provisioned by RKE handles certificate rotation directly through RKE. + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/rke-cis-1.7/node.yaml b/cfg/rke-cis-1.7/node.yaml new file mode 100644 index 000000000..8c9ec4c27 --- /dev/null +++ b/cfg/rke-cis-1.7/node.yaml @@ -0,0 +1,465 @@ +--- +controls: +version: "rke-cis-1.7" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + Not Applicable - Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + Not Applicable - Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: true + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + Not Applicable - Clusters provisioned by RKE do not require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + Not Applicable - Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + type: "skip" + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + Not Applicable - Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Not Applicable - Clusters provisioned by RKE handles certificate rotation directly through RKE. + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false diff --git a/cfg/rke-cis-1.7/policies.yaml b/cfg/rke-cis-1.7/policies.yaml new file mode 100644 index 000000000..a543875c3 --- /dev/null +++ b/cfg/rke-cis-1.7/policies.yaml @@ -0,0 +1,318 @@ +--- +controls: +version: "rke-cis-1.7" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "skip" + audit: check_for_default_sa.sh + tests: + test_items: + - flag: "true" + compare: + op: eq + value: "true" + set: true + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + Permissive - Kubernetes provides default service accounts to be used. + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "skip" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + Permissive - Enabling Network Policies can prevent certain applications from communicating with each other. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "skip" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + Permissive - Kubernetes provides a default namespace. + scored: false diff --git a/cfg/rke2-cis-1.23/config.yaml b/cfg/rke2-cis-1.23/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rke2-cis-1.23/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rke2-cis-1.23/controlplane.yaml b/cfg/rke2-cis-1.23/controlplane.yaml new file mode 100644 index 000000000..95bccaf36 --- /dev/null +++ b/cfg/rke2-cis-1.23/controlplane.yaml @@ -0,0 +1,50 @@ +--- +controls: +version: "rke2-cis-1.23" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: "/bin/ps -ef | grep kube-apiserver | grep -v grep | grep -o audit-policy-file" + type: "skip" + tests: + test_items: + - flag: "--audit-policy-file" + compare: + op: eq + value: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/rke2-cis-1.23/etcd.yaml b/cfg/rke2-cis-1.23/etcd.yaml new file mode 100644 index 000000000..251bcd8c4 --- /dev/null +++ b/cfg/rke2-cis-1.23/etcd.yaml @@ -0,0 +1,150 @@ +--- +controls: +version: "rke2-cis-1.23" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + type: "skip" + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + type: "skip" + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + type: "skip" + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + type: "skip" + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + audit_config: "cat /var/lib/rancher/rke2/server/db/etcd/config" + tests: + bin_op: or + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/rke2/server/tls/etcd/peer-ca.crt" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/rke2-cis-1.23/master.yaml b/cfg/rke2-cis-1.23/master.yaml new file mode 100644 index 000000000..aeb766ab2 --- /dev/null +++ b/cfg/rke2-cis-1.23/master.yaml @@ -0,0 +1,1038 @@ +--- +controls: +version: "rke2-cis-1.23" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "permissions" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 644 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/db/etcd" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + type: "skip" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/db/etcd" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "stat -c %a /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig" + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "stat -c %a /var/lib/rancher/rke2/server/cred/controller.kubeconfig" + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/controller.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root /var/lib/rancher/rke2/server/cred/controller.kubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.crt" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 644 /var/lib/rancher/rke2/server/tls/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.key" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/lib/rancher/rke2/server/tls/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + set: true + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--request-timeout" + set: false + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: "skip" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + type: skip + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + type: skip + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/rke2-cis-1.23/node.yaml b/cfg/rke2-cis-1.23/node.yaml new file mode 100644 index 000000000..c05fb391e --- /dev/null +++ b/cfg/rke2-cis-1.23/node.yaml @@ -0,0 +1,473 @@ +--- +controls: +version: "rke2-cis-1.23" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 644 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + set: true + compare: + op: eq + value: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)" + audit: "check_cafile_permissions.sh" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + set: true + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 644 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: "check_cafile_ownership.sh" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + set: true + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + set: true + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + set: true + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + type: skip + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/rke2-cis-1.23/policies.yaml b/cfg/rke2-cis-1.23/policies.yaml new file mode 100644 index 000000000..87db5aa50 --- /dev/null +++ b/cfg/rke2-cis-1.23/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "rke2-cis-1.23" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/rke2-cis-1.24/config.yaml b/cfg/rke2-cis-1.24/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rke2-cis-1.24/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rke2-cis-1.24/controlplane.yaml b/cfg/rke2-cis-1.24/controlplane.yaml new file mode 100644 index 000000000..9dc031778 --- /dev/null +++ b/cfg/rke2-cis-1.24/controlplane.yaml @@ -0,0 +1,50 @@ +--- +controls: +version: "rke2-cis-1.24" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: "/bin/ps -ef | grep kube-apiserver | grep -v grep | grep -o audit-policy-file" + type: "skip" + tests: + test_items: + - flag: "--audit-policy-file" + compare: + op: eq + value: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/rke2-cis-1.24/etcd.yaml b/cfg/rke2-cis-1.24/etcd.yaml new file mode 100644 index 000000000..1e62ff433 --- /dev/null +++ b/cfg/rke2-cis-1.24/etcd.yaml @@ -0,0 +1,150 @@ +--- +controls: +version: "rke2-cis-1.24" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + type: "skip" + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + type: "skip" + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + type: "skip" + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + type: "skip" + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + audit_config: "cat /var/lib/rancher/rke2/server/db/etcd/config" + tests: + bin_op: or + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/rke2/server/tls/etcd/peer-ca.crt" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/rke2-cis-1.24/master.yaml b/cfg/rke2-cis-1.24/master.yaml new file mode 100644 index 000000000..13afa29ea --- /dev/null +++ b/cfg/rke2-cis-1.24/master.yaml @@ -0,0 +1,1038 @@ +--- +controls: +version: "rke2-cis-1.24" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "permissions" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 644 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/db/etcd" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + type: "skip" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/db/etcd" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c %a /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c %a /var/lib/rancher/rke2/server/cred/controller.kubeconfig" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/controller.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root /var/lib/rancher/rke2/server/cred/controller.kubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.crt" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/lib/rancher/rke2/server/tls/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.key" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/lib/rancher/rke2/server/tls/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + set: true + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--request-timeout" + set: false + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: "skip" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + type: skip + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + type: skip + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/rke2-cis-1.24/node.yaml b/cfg/rke2-cis-1.24/node.yaml new file mode 100644 index 000000000..bfbc24d44 --- /dev/null +++ b/cfg/rke2-cis-1.24/node.yaml @@ -0,0 +1,469 @@ +--- +controls: +version: "rke2-cis-1.24" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + set: true + compare: + op: eq + value: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: "check_cafile_permissions.sh" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: "check_cafile_ownership.sh" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + set: true + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + set: true + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + type: skip + + - id: 4.2.9 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/rke2-cis-1.24/policies.yaml b/cfg/rke2-cis-1.24/policies.yaml new file mode 100644 index 000000000..2613f850d --- /dev/null +++ b/cfg/rke2-cis-1.24/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "rke2-cis-1.24" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/rke2-cis-1.7/config.yaml b/cfg/rke2-cis-1.7/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rke2-cis-1.7/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rke2-cis-1.7/controlplane.yaml b/cfg/rke2-cis-1.7/controlplane.yaml new file mode 100644 index 000000000..03d38658c --- /dev/null +++ b/cfg/rke2-cis-1.7/controlplane.yaml @@ -0,0 +1,62 @@ +--- +controls: +version: "rke2-cis-1.7" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + Permissive. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/rke2-cis-1.7/etcd.yaml b/cfg/rke2-cis-1.7/etcd.yaml new file mode 100644 index 000000000..885e5d442 --- /dev/null +++ b/cfg/rke2-cis-1.7/etcd.yaml @@ -0,0 +1,154 @@ +--- +controls: +version: "rke2-cis-1.7" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + Not Applicable. + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + Not Applicable. + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + Not Applicable. + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + Not Applicable. + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + audit_config: "cat /var/lib/rancher/rke2/server/db/etcd/config" + tests: + bin_op: or + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/rke2/server/tls/etcd/peer-ca.crt" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: true diff --git a/cfg/rke2-cis-1.7/master.yaml b/cfg/rke2-cis-1.7/master.yaml new file mode 100644 index 000000000..f7734b8cf --- /dev/null +++ b/cfg/rke2-cis-1.7/master.yaml @@ -0,0 +1,1027 @@ +--- +controls: +version: "rke2-cis-1.7" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/db/etcd" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + type: "skip" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/db/etcd" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/controller.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root /var/lib/rancher/rke2/server/cred/controller.kubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.crt" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/lib/rancher/rke2/server/tls/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.key" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/lib/rancher/rke2/server/tls/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + set: true + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 - NoteThis recommendation is obsolete and will be deleted per the consensus process (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.17 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + Permissive. + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + Permissive. + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + Permissive. + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--request-timeout" + set: false + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: true + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.29 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.30 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: "skip" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + Permissive. + scored: false + + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + type: skip + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + Not Applicable. + scored: false + + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + type: skip + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/rke2-cis-1.7/node.yaml b/cfg/rke2-cis-1.7/node.yaml new file mode 100644 index 000000000..765a3dd32 --- /dev/null +++ b/cfg/rke2-cis-1.7/node.yaml @@ -0,0 +1,461 @@ +--- +controls: +version: "rke2-cis-1.7" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + Not Applicable. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + Not applicable. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + set: true + compare: + op: eq + value: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: "check_cafile_permissions.sh" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: "check_cafile_ownership.sh" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + set: true + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + type: skip + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + Not Applicable. + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false diff --git a/cfg/rke2-cis-1.7/policies.yaml b/cfg/rke2-cis-1.7/policies.yaml new file mode 100644 index 000000000..75a3efea7 --- /dev/null +++ b/cfg/rke2-cis-1.7/policies.yaml @@ -0,0 +1,304 @@ +--- +controls: +version: "rke2-cis-1.7" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/util.go b/cmd/util.go index 5902159e2..95c0b2639 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -1,6 +1,7 @@ package cmd import ( + "context" "encoding/json" "fmt" "os" @@ -14,6 +15,10 @@ import ( "github.com/fatih/color" "github.com/golang/glog" "github.com/spf13/viper" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" ) // Print colors @@ -290,13 +295,35 @@ Alternatively, you can specify the version with --version ` func getKubeVersion() (*KubeVersion, error) { + kubeConfig, err := rest.InClusterConfig() + if err != nil { + glog.V(3).Infof("Error fetching cluster config: %s", err) + } + isRKE := false + if err == nil && kubeConfig != nil { + k8sClient, err := kubernetes.NewForConfig(kubeConfig) + if err != nil { + glog.V(3).Infof("Failed to fetch k8sClient object from kube config : %s", err) + } + + if err == nil { + isRKE, err = IsRKE(context.Background(), k8sClient) + if err != nil { + glog.V(3).Infof("Error detecting RKE cluster: %s", err) + } + } + } + if k8sVer, err := getKubeVersionFromRESTAPI(); err == nil { glog.V(2).Info(fmt.Sprintf("Kubernetes REST API Reported version: %s", k8sVer)) + if isRKE { + k8sVer.GitVersion = k8sVer.GitVersion + "-rancher1" + } return k8sVer, nil } // These executables might not be on the user's path. - _, err := exec.LookPath("kubectl") + _, err = exec.LookPath("kubectl") if err != nil { glog.V(3).Infof("Error locating kubectl: %s", err) _, err = exec.LookPath("kubelet") @@ -447,7 +474,7 @@ func getPlatformInfo() Platform { } func getPlatformInfoFromVersion(s string) Platform { - versionRe := regexp.MustCompile(`v(\d+\.\d+)\.\d+[-+](\w+)(?:[.\-])\w+`) + versionRe := regexp.MustCompile(`v(\d+\.\d+)\.\d+[-+](\w+)(?:[.\-+]*)\w+`) subs := versionRe.FindStringSubmatch(s) if len(subs) < 3 { return Platform{} @@ -481,6 +508,33 @@ func getPlatformBenchmarkVersion(platform Platform) string { } case "vmware": return "tkgi-1.2.53" + case "k3s": + switch platform.Version { + case "1.23": + return "k3s-cis-1.23" + case "1.24": + return "k3s-cis-1.24" + case "1.25", "1.26", "1.27": + return "k3s-cis-1.7" + } + case "rancher": + switch platform.Version { + case "1.23": + return "rke-cis-1.23" + case "1.24": + return "rke-cis-1.24" + case "1.25", "1.26", "1.27": + return "rke-cis-1.7" + } + case "rke2r": + switch platform.Version { + case "1.23": + return "rke2-cis-1.23" + case "1.24": + return "rke2-cis-1.24" + case "1.25", "1.26", "1.27": + return "rke2-cis-1.7" + } } return "" } @@ -535,3 +589,37 @@ func getOcpValidVersion(ocpVer string) (string, error) { glog.V(1).Info(fmt.Sprintf("getOcpBenchmarkVersion unable to find a match for: %q", ocpOriginal)) return "", fmt.Errorf("unable to find a matching Benchmark Version match for ocp version: %s", ocpOriginal) } + +// IsRKE Identifies if the cluster belongs to Rancher Distribution RKE +func IsRKE(ctx context.Context, k8sClient kubernetes.Interface) (bool, error) { + // if there are windows nodes then this should not be counted as rke.linux + windowsNodes, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ + Limit: 1, + LabelSelector: "kubernetes.io/os=windows", + }) + if err != nil { + return false, err + } + if len(windowsNodes.Items) != 0 { + return false, nil + } + + // Any node created by RKE should have the annotation, so just grab 1 + nodes, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{Limit: 1}) + if err != nil { + return false, err + } + + if len(nodes.Items) == 0 { + return false, nil + } + + annos := nodes.Items[0].Annotations + if _, ok := annos["rke.cattle.io/external-ip"]; ok { + return true, nil + } + if _, ok := annos["rke.cattle.io/internal-ip"]; ok { + return true, nil + } + return false, nil +} diff --git a/cmd/util_test.go b/cmd/util_test.go index ad8d2a02d..b5d83fd8f 100644 --- a/cmd/util_test.go +++ b/cmd/util_test.go @@ -613,6 +613,21 @@ func Test_getPlatformNameFromKubectlOutput(t *testing.T) { args: args{s: ""}, want: Platform{}, }, + { + name: "k3s", + args: args{s: "v1.27.6+k3s1"}, + want: Platform{Name: "k3s", Version: "1.27"}, + }, + { + name: "rancher1", + args: args{s: "v1.25.13-rancher1-1"}, + want: Platform{Name: "rancher1", Version: "1.25"}, + }, + { + name: "rke2", + args: args{s: "v1.27.6+rke2r1"}, + want: Platform{Name: "rke2r", Version: "1.27"}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -694,6 +709,27 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) { }, want: "rh-1.0", }, + { + name: "k3s", + args: args{ + platform: Platform{Name: "k3s", Version: "1.27"}, + }, + want: "k3s-cis-1.7", + }, + { + name: "rancher1", + args: args{ + platform: Platform{Name: "rancher", Version: "1.27"}, + }, + want: "rke-cis-1.7", + }, + { + name: "rke2", + args: args{ + platform: Platform{Name: "rke2r", Version: "1.27"}, + }, + want: "rke2-cis-1.7", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/docs/platforms.md b/docs/platforms.md index e0429210b..bbfd11a64 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -8,24 +8,26 @@ Most of our supported benchmarks are defined in one of the following: Some defined by other hardenening guides. -| Source | Kubernetes Benchmark | kube-bench config | Kubernetes versions | -|------|-------------------------------------------------------------------------------------------------------------|--------------------------|---------------------| -| CIS | [1.5.1](https://workbench.cisecurity.org/benchmarks/4892) | cis-1.5 | 1.15 | -| CIS | [1.6.0](https://workbench.cisecurity.org/benchmarks/4834) | cis-1.6 | 1.16-1.18 | -| CIS | [1.20](https://workbench.cisecurity.org/benchmarks/6246) | cis-1.20 | 1.19-1.21 | -| CIS | [1.23](https://workbench.cisecurity.org/benchmarks/7532) | cis-1.23 | 1.22-1.23 | -| CIS | [1.24](https://workbench.cisecurity.org/benchmarks/10873) | cis-1.24 | 1.24 | -| CIS | [1.7](https://workbench.cisecurity.org/benchmarks/11107) | cis-1.7 | 1.25 | -| CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | -| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | -| CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | -| CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS | -| CIS | [EKS 1.2.0](https://workbench.cisecurity.org/benchmarks/9681) | eks-1.2.0 | EKS | -| CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | -| CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | -| RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | -| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | -| CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 | -| CIS | [1.24-microk8s](https://microk8s.io/docs/how-to-cis-harden) | cis-1.24-microk8s | microk8s v1.28 | -| DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS | -| CIS | [TKGI 1.2.53](https://network.pivotal.io/products/p-compliance-scanner#/releases/1248397) | tkgi-1.2.53 | vmware | +| Source | Kubernetes Benchmark | kube-bench config | Kubernetes versions | +|--------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------|---------------------| +| CIS | [1.5.1](https://workbench.cisecurity.org/benchmarks/4892) | cis-1.5 | 1.15 | +| CIS | [1.6.0](https://workbench.cisecurity.org/benchmarks/4834) | cis-1.6 | 1.16-1.18 | +| CIS | [1.20](https://workbench.cisecurity.org/benchmarks/6246) | cis-1.20 | 1.19-1.21 | +| CIS | [1.23](https://workbench.cisecurity.org/benchmarks/7532) | cis-1.23 | 1.22-1.23 | +| CIS | [1.24](https://workbench.cisecurity.org/benchmarks/10873) | cis-1.24 | 1.24 | +| CIS | [1.7](https://workbench.cisecurity.org/benchmarks/11107) | cis-1.7 | 1.25 | +| CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | +| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | +| CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | +| CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS | +| CIS | [EKS 1.2.0](https://workbench.cisecurity.org/benchmarks/9681) | eks-1.2.0 | EKS | +| CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | +| CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | +| RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | +| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | +| CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 | +| DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS | +| CIS | [TKGI 1.2.53](https://network.pivotal.io/products/p-compliance-scanner#/releases/1248397) | tkgi-1.2.53 | vmware | +| CIS | [1.7.0-rke](https://ranchermanager.docs.rancher.com/v2.7/reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.7-k8s-v1.25-v1.26-v1.27) | rke-cis-1.7 | rke v1.25-v1.27 | +| CIS | [1.7.0-rke2](https://ranchermanager.docs.rancher.com/v2.7/reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.7-k8s-v1.25-v1.26-v1.27) | rke2-cis-1.6 | rke2 v1.25-v1.27 | +| CIS | [1.7.0-k3s](https://ranchermanager.docs.rancher.com/v2.7/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.7-k8s-v1.25-v1.26-v1.27) | k3s-cis-1.7 | k3s v1.25-v1.27 | diff --git a/docs/running.md b/docs/running.md index fb7fc68b5..0ee670ff8 100644 --- a/docs/running.md +++ b/docs/running.md @@ -191,4 +191,31 @@ To run the benchmark as a job in your VMware tkgi cluster apply the included `jo ``` kubectl apply -f job-tkgi.yaml -``` \ No newline at end of file +``` + +### Running in a Rancher RKE cluster + +| CIS Benchmark | Targets | +|---------------|--------------------------------------------| +| rke-cis-1.7 | master, etcd, controlplane, node, policies | + +kube-bench includes benchmarks for Rancher RKE platform. +To run this you will need to specify `--benchmark rke-cis-1.7` when you run the `kube-bench` command. + +### Running in a Rancher RKE2 cluster + +| CIS Benchmark | Targets | +|---------------|--------------------------------------------| +| rke2-cis-1.7 | master, etcd, controlplane, node, policies | + +kube-bench includes benchmarks for Rancher RKE2 platform. +To run this you will need to specify `--benchmark rke2-cis-1.7` when you run the `kube-bench` command. + +### Running in a Rancher K3s cluster + +| CIS Benchmark | Targets | +|---------------|--------------------------------------------| +| k3s-cis-1.7 | master, etcd, controlplane, node, policies | + +kube-bench includes benchmarks for Rancher K3S platform. +To run this you will need to specify `--benchmark k3s-cis-1.7` when you run the `kube-bench` command. diff --git a/go.mod b/go.mod index ae41c4aea..d679bca6b 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.4.6 gorm.io/gorm v1.25.1 + k8s.io/apimachinery v0.26.0 k8s.io/client-go v0.26.0 ) @@ -32,7 +33,17 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 // indirect github.com/aws/smithy-go v1.13.5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.19.14 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.1.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect @@ -40,9 +51,15 @@ require ( github.com/jackc/pgx/v5 v5.2.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.5 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -52,9 +69,23 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect - golang.org/x/crypto v0.4.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect golang.org/x/sys v0.14.0 // indirect - golang.org/x/text v0.5.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.26.0 // indirect + k8s.io/klog/v2 v2.80.1 // indirect + k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect + k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect + sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 178fbe9af..76b7ef4e3 100644 --- a/go.sum +++ b/go.sum @@ -81,6 +81,9 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -97,7 +100,20 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= @@ -125,8 +141,13 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -136,8 +157,13 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -180,11 +206,17 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -194,6 +226,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -201,6 +237,14 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -208,9 +252,10 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= @@ -237,6 +282,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= @@ -245,6 +291,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -273,8 +320,9 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -345,7 +393,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -355,6 +404,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -418,6 +469,8 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -427,11 +480,14 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -469,6 +525,7 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -478,6 +535,7 @@ golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= @@ -485,7 +543,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -511,6 +568,7 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -542,6 +600,7 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -574,12 +633,20 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -591,6 +658,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.4.6 h1:1FPESNXqIKG5JmraaH2bfCVlMQ7paLoCreFxDtqzwdc= @@ -605,8 +673,24 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= +k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= +k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= +k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= +k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= +sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= From ade7cef969d18aa8a2f443df70b33ce4bc754961 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Dec 2023 20:11:43 +0200 Subject: [PATCH 183/262] build(deps): bump gorm.io/gorm from 1.25.1 to 1.25.5 (#1516) Bumps [gorm.io/gorm](https://github.com/go-gorm/gorm) from 1.25.1 to 1.25.5. - [Release notes](https://github.com/go-gorm/gorm/releases) - [Commits](https://github.com/go-gorm/gorm/compare/v1.25.1...v1.25.5) --- updated-dependencies: - dependency-name: gorm.io/gorm dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d679bca6b..e807e96c4 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/stretchr/testify v1.8.1 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.4.6 - gorm.io/gorm v1.25.1 + gorm.io/gorm v1.25.5 k8s.io/apimachinery v0.26.0 k8s.io/client-go v0.26.0 ) diff --git a/go.sum b/go.sum index 76b7ef4e3..88e2d804e 100644 --- a/go.sum +++ b/go.sum @@ -664,8 +664,8 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.4.6 h1:1FPESNXqIKG5JmraaH2bfCVlMQ7paLoCreFxDtqzwdc= gorm.io/driver/postgres v1.4.6/go.mod h1:UJChCNLFKeBqQRE+HrkFUbKbq9idPXmTOk2u4Wok8S4= gorm.io/gorm v1.24.2/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= -gorm.io/gorm v1.25.1 h1:nsSALe5Pr+cM3V1qwwQ7rOkw+6UeLrX5O4v3llhHa64= -gorm.io/gorm v1.25.1/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k= +gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= +gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 92a18e7dfd6186375d37ef9892fb87958d08a55c Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Sat, 2 Dec 2023 15:59:30 +0800 Subject: [PATCH 184/262] support CIS Kubernetes Benchmark v1.8.0 (#1527) * support CIS Kubernetes Benchmark v1.8.0 * update version info --- cfg/cis-1.8/config.yaml | 2 + cfg/cis-1.8/controlplane.yaml | 60 +++ cfg/cis-1.8/etcd.yaml | 135 +++++ cfg/cis-1.8/master.yaml | 929 ++++++++++++++++++++++++++++++++++ cfg/cis-1.8/node.yaml | 457 +++++++++++++++++ cfg/cis-1.8/policies.yaml | 304 +++++++++++ cfg/config.yaml | 7 + cmd/common_test.go | 1 + docs/architecture.md | 1 + docs/platforms.md | 1 + 10 files changed, 1897 insertions(+) create mode 100644 cfg/cis-1.8/config.yaml create mode 100644 cfg/cis-1.8/controlplane.yaml create mode 100644 cfg/cis-1.8/etcd.yaml create mode 100644 cfg/cis-1.8/master.yaml create mode 100644 cfg/cis-1.8/node.yaml create mode 100644 cfg/cis-1.8/policies.yaml diff --git a/cfg/cis-1.8/config.yaml b/cfg/cis-1.8/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/cis-1.8/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/cis-1.8/controlplane.yaml b/cfg/cis-1.8/controlplane.yaml new file mode 100644 index 000000000..378b85d63 --- /dev/null +++ b/cfg/cis-1.8/controlplane.yaml @@ -0,0 +1,60 @@ +--- +controls: +version: "cis-1.8" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/cis-1.8/etcd.yaml b/cfg/cis-1.8/etcd.yaml new file mode 100644 index 000000000..8e47f2987 --- /dev/null +++ b/cfg/cis-1.8/etcd.yaml @@ -0,0 +1,135 @@ +--- +controls: +version: "cis-1.8" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/cis-1.8/master.yaml b/cfg/cis-1.8/master.yaml new file mode 100644 index 000000000..d0c1332a3 --- /dev/null +++ b/cfg/cis-1.8/master.yaml @@ -0,0 +1,929 @@ +--- +controls: +version: "cis-1.8" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: have + value: "DenyServiceExternalIPs" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: false + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.17 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.21 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.22 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.23 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.24 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.25 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.28 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.29 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.30 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.8/node.yaml b/cfg/cis-1.8/node.yaml new file mode 100644 index 000000000..d51cb6946 --- /dev/null +++ b/cfg/cis-1.8/node.yaml @@ -0,0 +1,457 @@ +--- +controls: +version: "cis-1.8" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false diff --git a/cfg/cis-1.8/policies.yaml b/cfg/cis-1.8/policies.yaml new file mode 100644 index 000000000..c9b13c94c --- /dev/null +++ b/cfg/cis-1.8/policies.yaml @@ -0,0 +1,304 @@ +--- +controls: +version: "cis-1.8" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/config.yaml b/cfg/config.yaml index fc95ba56e..05aeeb477 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -277,6 +277,7 @@ version_mapping: "1.23": "cis-1.23" "1.24": "cis-1.24" "1.25": "cis-1.7" + "1.26": "cis-1.8" "eks-1.0.1": "eks-1.0.1" "eks-1.1.0": "eks-1.1.0" "eks-1.2.0": "eks-1.2.0" @@ -349,6 +350,12 @@ target_mapping: - "controlplane" - "etcd" - "policies" + "cis-1.8": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" "gke-1.0": - "master" - "node" diff --git a/cmd/common_test.go b/cmd/common_test.go index 89cd7f84d..aa006b149 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -241,6 +241,7 @@ func TestMapToCISVersion(t *testing.T) { {kubeVersion: "1.23", succeed: true, exp: "cis-1.23"}, {kubeVersion: "1.24", succeed: true, exp: "cis-1.24"}, {kubeVersion: "1.25", succeed: true, exp: "cis-1.7"}, + {kubeVersion: "1.26", succeed: true, exp: "cis-1.8"}, {kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"}, {kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"}, {kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"}, diff --git a/docs/architecture.md b/docs/architecture.md index ba5a51c6c..09d081e30 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -21,6 +21,7 @@ The following table shows the valid targets based on the CIS Benchmark version. | cis-1.23 | master, controlplane, node, etcd, policies | | cis-1.24 | master, controlplane, node, etcd, policies | | cis-1.7 | master, controlplane, node, etcd, policies | +| cis-1.8 | master, controlplane, node, etcd, policies | | gke-1.0 | master, controlplane, node, etcd, policies, managedservices | | gke-1.2.0 | controlplane, node, policies, managedservices | | eks-1.0.1 | controlplane, node, policies, managedservices | diff --git a/docs/platforms.md b/docs/platforms.md index bbfd11a64..92b6c7d10 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -16,6 +16,7 @@ Some defined by other hardenening guides. | CIS | [1.23](https://workbench.cisecurity.org/benchmarks/7532) | cis-1.23 | 1.22-1.23 | | CIS | [1.24](https://workbench.cisecurity.org/benchmarks/10873) | cis-1.24 | 1.24 | | CIS | [1.7](https://workbench.cisecurity.org/benchmarks/11107) | cis-1.7 | 1.25 | +| CIS | [1.8](https://workbench.cisecurity.org/benchmarks/12958) | cis-1.8 | 1.26 | | CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | | CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | | CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | From 0c553cd2f62129aefdca11591d1086275d2e3c49 Mon Sep 17 00:00:00 2001 From: Huang Huang Date: Sun, 3 Dec 2023 15:06:35 +0800 Subject: [PATCH 185/262] fix wrong use of flag in test_items found in 4.13 and 4.14 (#1528) * fix wrong use of flag in test_items found in 4.13 and 4.14 Fixes #1491 * fix for more benchmarks * update integration test * fix test --- cfg/ack-1.0/node.yaml | 4 ---- cfg/cis-1.20/node.yaml | 4 ---- cfg/cis-1.23/node.yaml | 4 ---- cfg/cis-1.24-microk8s/node.yaml | 4 ---- cfg/cis-1.24/node.yaml | 4 ---- cfg/cis-1.5/node.yaml | 4 ---- cfg/cis-1.7/node.yaml | 4 ---- cfg/cis-1.8/node.yaml | 4 ---- cfg/k3s-cis-1.23/node.yaml | 4 ---- cfg/rke-cis-1.23/node.yaml | 4 ---- cfg/rke2-cis-1.23/node.yaml | 4 ---- integration/testdata/Expected_output.data | 19 +++++++++++++------ 12 files changed, 13 insertions(+), 50 deletions(-) diff --git a/cfg/ack-1.0/node.yaml b/cfg/ack-1.0/node.yaml index 19a08177e..961dbcd63 100644 --- a/cfg/ack-1.0/node.yaml +++ b/cfg/ack-1.0/node.yaml @@ -45,8 +45,6 @@ groups: compare: op: bitmask value: "644" - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -60,8 +58,6 @@ groups: bin_op: or test_items: - flag: root:root - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig diff --git a/cfg/cis-1.20/node.yaml b/cfg/cis-1.20/node.yaml index 2bb949964..72524ae46 100644 --- a/cfg/cis-1.20/node.yaml +++ b/cfg/cis-1.20/node.yaml @@ -46,8 +46,6 @@ groups: compare: op: bitmask value: "644" - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -61,8 +59,6 @@ groups: bin_op: or test_items: - flag: root:root - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig diff --git a/cfg/cis-1.23/node.yaml b/cfg/cis-1.23/node.yaml index 7f93b334a..3105d37bf 100644 --- a/cfg/cis-1.23/node.yaml +++ b/cfg/cis-1.23/node.yaml @@ -45,8 +45,6 @@ groups: compare: op: bitmask value: "644" - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -60,8 +58,6 @@ groups: bin_op: or test_items: - flag: root:root - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig diff --git a/cfg/cis-1.24-microk8s/node.yaml b/cfg/cis-1.24-microk8s/node.yaml index 7b9278a54..55dc5c670 100644 --- a/cfg/cis-1.24-microk8s/node.yaml +++ b/cfg/cis-1.24-microk8s/node.yaml @@ -45,8 +45,6 @@ groups: compare: op: bitmask value: "600" - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -60,8 +58,6 @@ groups: bin_op: or test_items: - flag: root:root - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig diff --git a/cfg/cis-1.24/node.yaml b/cfg/cis-1.24/node.yaml index 6ee5dbce8..f85d7ce24 100644 --- a/cfg/cis-1.24/node.yaml +++ b/cfg/cis-1.24/node.yaml @@ -45,8 +45,6 @@ groups: compare: op: bitmask value: "600" - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -60,8 +58,6 @@ groups: bin_op: or test_items: - flag: root:root - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig diff --git a/cfg/cis-1.5/node.yaml b/cfg/cis-1.5/node.yaml index 1349054c9..fe47d64cb 100644 --- a/cfg/cis-1.5/node.yaml +++ b/cfg/cis-1.5/node.yaml @@ -48,8 +48,6 @@ groups: compare: op: bitmask value: "644" - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -64,8 +62,6 @@ groups: test_items: - flag: root:root set: true - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig diff --git a/cfg/cis-1.7/node.yaml b/cfg/cis-1.7/node.yaml index d4eabc9f9..e109d41c6 100644 --- a/cfg/cis-1.7/node.yaml +++ b/cfg/cis-1.7/node.yaml @@ -45,8 +45,6 @@ groups: compare: op: bitmask value: "600" - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -60,8 +58,6 @@ groups: bin_op: or test_items: - flag: root:root - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig diff --git a/cfg/cis-1.8/node.yaml b/cfg/cis-1.8/node.yaml index d51cb6946..04f4270eb 100644 --- a/cfg/cis-1.8/node.yaml +++ b/cfg/cis-1.8/node.yaml @@ -45,8 +45,6 @@ groups: compare: op: bitmask value: "600" - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -60,8 +58,6 @@ groups: bin_op: or test_items: - flag: root:root - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig diff --git a/cfg/k3s-cis-1.23/node.yaml b/cfg/k3s-cis-1.23/node.yaml index 211cdbc71..80f0c5a85 100644 --- a/cfg/k3s-cis-1.23/node.yaml +++ b/cfg/k3s-cis-1.23/node.yaml @@ -47,8 +47,6 @@ groups: compare: op: bitmask value: "644" - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -62,8 +60,6 @@ groups: bin_op: or test_items: - flag: root:root - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig diff --git a/cfg/rke-cis-1.23/node.yaml b/cfg/rke-cis-1.23/node.yaml index 12d3679f1..3fac4fad6 100644 --- a/cfg/rke-cis-1.23/node.yaml +++ b/cfg/rke-cis-1.23/node.yaml @@ -46,8 +46,6 @@ groups: compare: op: bitmask value: "644" - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -61,8 +59,6 @@ groups: bin_op: or test_items: - flag: root:root - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig diff --git a/cfg/rke2-cis-1.23/node.yaml b/cfg/rke2-cis-1.23/node.yaml index c05fb391e..596208a73 100644 --- a/cfg/rke2-cis-1.23/node.yaml +++ b/cfg/rke2-cis-1.23/node.yaml @@ -48,8 +48,6 @@ groups: compare: op: bitmask value: "644" - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -63,8 +61,6 @@ groups: bin_op: or test_items: - flag: root:root - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig diff --git a/integration/testdata/Expected_output.data b/integration/testdata/Expected_output.data index 0df5ce0b3..d0cfe93fc 100644 --- a/integration/testdata/Expected_output.data +++ b/integration/testdata/Expected_output.data @@ -221,8 +221,8 @@ minimum. [INFO] 4.1 Worker Node Configuration Files [PASS] 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) [PASS] 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) -[PASS] 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual) -[PASS] 4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root (Manual) +[WARN] 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual) +[WARN] 4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root (Manual) [PASS] 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) [PASS] 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) [PASS] 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual) @@ -245,6 +245,13 @@ minimum. [WARN] 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) == Remediations node == +4.1.3 Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /etc/kubernetes/proxy.conf + +4.1.4 Run the below command (based on the file location on your system) on the each worker node. +For example, chown root:root /etc/kubernetes/proxy.conf + 4.2.6 If using a Kubelet config file, edit the file to set protectKernelDefaults: true. If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and @@ -287,9 +294,9 @@ systemctl restart kubelet.service == Summary node == -19 checks PASS +17 checks PASS 1 checks FAIL -3 checks WARN +5 checks WARN 0 checks INFO [INFO] 5 Kubernetes Policies @@ -419,8 +426,8 @@ resources and that all new resources are created in a specific namespace. 0 checks INFO == Summary total == -69 checks PASS +67 checks PASS 11 checks FAIL -43 checks WARN +45 checks WARN 0 checks INFO From 292678a9073c5fab2937cb995071eec0417faa1e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 14:18:41 +0200 Subject: [PATCH 186/262] build(deps): bump actions/checkout from 3 to 4 (#1492) Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/build.yml | 8 ++++---- .github/workflows/mkdocs-deploy.yaml | 2 +- .github/workflows/publish.yml | 2 +- .github/workflows/release.yml | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 65731766f..f0cd14fa8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -28,7 +28,7 @@ jobs: with: go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: yaml-lint uses: ibiqlik/action-yamllint@v3 - name: Setup golangci-lint @@ -45,7 +45,7 @@ jobs: with: go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run unit tests run: make tests - name: Upload code coverage @@ -61,7 +61,7 @@ jobs: with: go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Setup Kubernetes cluster (KIND) uses: engineerd/setup-kind@v0.5.0 with: @@ -91,7 +91,7 @@ jobs: with: go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Dry-run release snapshot diff --git a/.github/workflows/mkdocs-deploy.yaml b/.github/workflows/mkdocs-deploy.yaml index a7bf4e2b5..91c2954bf 100644 --- a/.github/workflows/mkdocs-deploy.yaml +++ b/.github/workflows/mkdocs-deploy.yaml @@ -16,7 +16,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout main - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 persist-credentials: true diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 5e943a275..5660e9dd7 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check Out Repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up QEMU uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a6633d913..df119484f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,7 +19,7 @@ jobs: with: go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Run unit tests From c3e3c4c31c26e6dddfe937e46f3f0dc8c98aa774 Mon Sep 17 00:00:00 2001 From: guangwu Date: Tue, 5 Dec 2023 16:52:24 +0800 Subject: [PATCH 187/262] chore: remove refs to deprecated io/ioutil (#1504) Signed-off-by: guoguangwu Co-authored-by: chenk --- check/controls_test.go | 3 +-- check/test_test.go | 3 +-- cmd/common.go | 3 +-- cmd/common_test.go | 22 +++++++++++----------- cmd/kubernetes_version.go | 8 ++++---- cmd/kubernetes_version_test.go | 7 +++---- cmd/run_test.go | 5 ++--- cmd/util_test.go | 13 ++++++------- 8 files changed, 29 insertions(+), 35 deletions(-) diff --git a/check/controls_test.go b/check/controls_test.go index 5393eef21..c6d7f1c59 100644 --- a/check/controls_test.go +++ b/check/controls_test.go @@ -19,7 +19,6 @@ import ( "encoding/json" "encoding/xml" "fmt" - "io/ioutil" "os" "path/filepath" "reflect" @@ -53,7 +52,7 @@ func TestYamlFiles(t *testing.T) { } if !info.IsDir() { t.Logf("reading file: %s", path) - in, err := ioutil.ReadFile(path) + in, err := os.ReadFile(path) if err != nil { t.Fatalf("error opening file %s: %v", path, err) } diff --git a/check/test_test.go b/check/test_test.go index 29eb26922..9da55a491 100644 --- a/check/test_test.go +++ b/check/test_test.go @@ -16,7 +16,6 @@ package check import ( "fmt" - "io/ioutil" "os" "strings" "testing" @@ -29,7 +28,7 @@ var ( func init() { var err error - in, err = ioutil.ReadFile("data") + in, err = os.ReadFile("data") if err != nil { panic("Failed reading test data: " + err.Error()) } diff --git a/cmd/common.go b/cmd/common.go index dc447e80f..6f83cee4d 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -18,7 +18,6 @@ import ( "bufio" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "sort" @@ -71,7 +70,7 @@ func runChecks(nodetype check.NodeType, testYamlFile, detectedVersion string) { os.Exit(1) } - in, err := ioutil.ReadFile(testYamlFile) + in, err := os.ReadFile(testYamlFile) if err != nil { exitWithError(fmt.Errorf("error opening %s test file: %v", testYamlFile, err)) } diff --git a/cmd/common_test.go b/cmd/common_test.go index aa006b149..3627c9865 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -18,7 +18,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "os" "path" "path/filepath" @@ -681,7 +681,7 @@ func TestPrintSummary(t *testing.T) { os.Stdout = w printSummary(resultTotals, "totals") w.Close() - out, _ := ioutil.ReadAll(r) + out, _ := io.ReadAll(r) os.Stdout = rescueStdout assert.Contains(t, string(out), "49 checks PASS\n12 checks FAIL\n14 checks WARN\n0 checks INFO\n\n") @@ -700,7 +700,7 @@ func TestPrettyPrintNoSummary(t *testing.T) { noSummary = true prettyPrint(controlsCollection[0], resultTotals) w.Close() - out, _ := ioutil.ReadAll(r) + out, _ := io.ReadAll(r) os.Stdout = rescueStdout assert.NotContains(t, string(out), "49 checks PASS") @@ -719,7 +719,7 @@ func TestPrettyPrintSummary(t *testing.T) { noSummary = false prettyPrint(controlsCollection[0], resultTotals) w.Close() - out, _ := ioutil.ReadAll(r) + out, _ := io.ReadAll(r) os.Stdout = rescueStdout assert.Contains(t, string(out), "49 checks PASS") @@ -737,7 +737,7 @@ func TestWriteStdoutOutputNoTotal(t *testing.T) { noTotals = true writeStdoutOutput(controlsCollection) w.Close() - out, _ := ioutil.ReadAll(r) + out, _ := io.ReadAll(r) os.Stdout = rescueStdout assert.NotContains(t, string(out), "49 checks PASS") @@ -757,7 +757,7 @@ func TestWriteStdoutOutputTotal(t *testing.T) { noTotals = false writeStdoutOutput(controlsCollection) w.Close() - out, _ := ioutil.ReadAll(r) + out, _ := io.ReadAll(r) os.Stdout = rescueStdout @@ -767,7 +767,7 @@ func TestWriteStdoutOutputTotal(t *testing.T) { func parseControlsJsonFile(filepath string) ([]*check.Controls, error) { var result []*check.Controls - d, err := ioutil.ReadFile(filepath) + d, err := os.ReadFile(filepath) if err != nil { return nil, err } @@ -782,7 +782,7 @@ func parseControlsJsonFile(filepath string) ([]*check.Controls, error) { func parseResultJsonFile(filepath string) (JsonOutputFormat, error) { var result JsonOutputFormat - d, err := ioutil.ReadFile(filepath) + d, err := os.ReadFile(filepath) if err != nil { return result, err } @@ -797,7 +797,7 @@ func parseResultJsonFile(filepath string) (JsonOutputFormat, error) { func parseResultNoTotalsJsonFile(filepath string) ([]*check.Controls, error) { var result []*check.Controls - d, err := ioutil.ReadFile(filepath) + d, err := os.ReadFile(filepath) if err != nil { return nil, err } @@ -822,7 +822,7 @@ type restoreFn func() func fakeExecutableInPath(execFile, execCode string) (restoreFn, error) { pathenv := os.Getenv("PATH") - tmp, err := ioutil.TempDir("", "TestfakeExecutableInPath") + tmp, err := os.MkdirTemp("", "TestfakeExecutableInPath") if err != nil { return nil, err } @@ -833,7 +833,7 @@ func fakeExecutableInPath(execFile, execCode string) (restoreFn, error) { } if len(execCode) > 0 { - ioutil.WriteFile(filepath.Join(tmp, execFile), []byte(execCode), 0700) + os.WriteFile(filepath.Join(tmp, execFile), []byte(execCode), 0700) } else { f, err := os.OpenFile(execFile, os.O_CREATE|os.O_EXCL, 0700) if err != nil { diff --git a/cmd/kubernetes_version.go b/cmd/kubernetes_version.go index da7769ca6..ae11909af 100644 --- a/cmd/kubernetes_version.go +++ b/cmd/kubernetes_version.go @@ -5,7 +5,7 @@ import ( "encoding/json" "encoding/pem" "fmt" - "io/ioutil" + "io" "net/http" "os" "strings" @@ -45,7 +45,7 @@ func getKubeVersionFromRESTAPI() (*KubeVersion, error) { return nil, err } - tb, err := ioutil.ReadFile(tokenfile) + tb, err := os.ReadFile(tokenfile) if err != nil { glog.V(2).Infof("Failed reading token file Error: %s", err) return nil, err @@ -143,11 +143,11 @@ func getWebData(srvURL, token string, cacert *tls.Certificate) ([]byte, error) { return nil, err } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } func loadCertificate(certFile string) (*tls.Certificate, error) { - cacert, err := ioutil.ReadFile(certFile) + cacert, err := os.ReadFile(certFile) if err != nil { return nil, err } diff --git a/cmd/kubernetes_version_test.go b/cmd/kubernetes_version_test.go index 9b8bc8d0c..d76fbe2b7 100644 --- a/cmd/kubernetes_version_test.go +++ b/cmd/kubernetes_version_test.go @@ -3,7 +3,6 @@ package cmd import ( "crypto/tls" "fmt" - "io/ioutil" "net/http" "net/http/httptest" "os" @@ -12,13 +11,13 @@ import ( ) func TestLoadCertificate(t *testing.T) { - tmp, err := ioutil.TempDir("", "TestFakeLoadCertificate") + tmp, err := os.MkdirTemp("", "TestFakeLoadCertificate") if err != nil { t.Fatalf("unable to create temp directory: %v", err) } defer os.RemoveAll(tmp) - goodCertFile, _ := ioutil.TempFile(tmp, "good-cert-*") + goodCertFile, _ := os.CreateTemp(tmp, "good-cert-*") _, _ = goodCertFile.Write([]byte(`-----BEGIN CERTIFICATE----- MIICyDCCAbCgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl cm5ldGVzMB4XDTE5MTEwODAxNDAwMFoXDTI5MTEwNTAxNDAwMFowFTETMBEGA1UE @@ -36,7 +35,7 @@ jLv3UYZRHMpuNS8BJU74BuVzVPHd55RAl+bV8yemdZJ7pPzMvGbZ7zRXWODTDlge CQb9lY+jYErisH8Sq7uABFPvi7RaTh8SS7V7OxqHZvmttNTdZs4TIkk45JK7Y+Xq FAjB57z2NcIgJuVpQnGRYtr/JcH2Qdsq8bLtXaojUIWOOqoTDRLYozdMOOQ= -----END CERTIFICATE-----`)) - badCertFile, _ := ioutil.TempFile(tmp, "bad-cert-*") + badCertFile, _ := os.CreateTemp(tmp, "bad-cert-*") cases := []struct { file string diff --git a/cmd/run_test.go b/cmd/run_test.go index 2be443f28..7495e5cfb 100644 --- a/cmd/run_test.go +++ b/cmd/run_test.go @@ -1,7 +1,6 @@ package cmd import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -45,7 +44,7 @@ func TestGetTestYamlFiles(t *testing.T) { // Set up temp config directory var err error - cfgDir, err = ioutil.TempDir("", "kube-bench-test") + cfgDir, err = os.MkdirTemp("", "kube-bench-test") if err != nil { t.Fatalf("Failed to create temp directory") } @@ -59,7 +58,7 @@ func TestGetTestYamlFiles(t *testing.T) { // We never expect config.yaml to be returned for _, filename := range []string{"one.yaml", "two.yaml", "three.yaml", "config.yaml"} { - err = ioutil.WriteFile(filepath.Join(d, filename), []byte("hello world"), 0666) + err = os.WriteFile(filepath.Join(d, filename), []byte("hello world"), 0666) if err != nil { t.Fatalf("error writing temp file %s: %v", filename, err) } diff --git a/cmd/util_test.go b/cmd/util_test.go index b5d83fd8f..2c24a7a95 100644 --- a/cmd/util_test.go +++ b/cmd/util_test.go @@ -16,7 +16,6 @@ package cmd import ( "errors" - "io/ioutil" "os" "path/filepath" "reflect" @@ -399,7 +398,7 @@ func TestGetServiceFiles(t *testing.T) { func TestGetDatadirFiles(t *testing.T) { var err error - datadir, err := ioutil.TempDir("", "kube-bench-test-etcd-data-dir") + datadir, err := os.MkdirTemp("", "kube-bench-test-etcd-data-dir") if err != nil { t.Fatalf("Failed to create temp directory") } @@ -474,7 +473,7 @@ func TestMakeSubsitutions(t *testing.T) { func TestGetConfigFilePath(t *testing.T) { var err error - cfgDir, err = ioutil.TempDir("", "kube-bench-test") + cfgDir, err = os.MkdirTemp("", "kube-bench-test") if err != nil { t.Fatalf("Failed to create temp directory") } @@ -484,7 +483,7 @@ func TestGetConfigFilePath(t *testing.T) { if err != nil { t.Fatalf("Failed to create temp dir") } - err = ioutil.WriteFile(filepath.Join(d, "master.yaml"), []byte("hello world"), 0666) + err = os.WriteFile(filepath.Join(d, "master.yaml"), []byte("hello world"), 0666) if err != nil { t.Logf("Failed to create temp file") } @@ -545,7 +544,7 @@ func TestDecrementVersion(t *testing.T) { } func TestGetYamlFilesFromDir(t *testing.T) { - cfgDir, err := ioutil.TempDir("", "kube-bench-test") + cfgDir, err := os.MkdirTemp("", "kube-bench-test") if err != nil { t.Fatalf("Failed to create temp directory") } @@ -557,11 +556,11 @@ func TestGetYamlFilesFromDir(t *testing.T) { t.Fatalf("Failed to create temp dir") } - err = ioutil.WriteFile(filepath.Join(d, "something.yaml"), []byte("hello world"), 0666) + err = os.WriteFile(filepath.Join(d, "something.yaml"), []byte("hello world"), 0666) if err != nil { t.Fatalf("error writing file %v", err) } - err = ioutil.WriteFile(filepath.Join(d, "config.yaml"), []byte("hello world"), 0666) + err = os.WriteFile(filepath.Join(d, "config.yaml"), []byte("hello world"), 0666) if err != nil { t.Fatalf("error writing file %v", err) } From 875fbc7f20bec892d6abdbae66bd62762bb3ee0a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Dec 2023 08:07:14 +0200 Subject: [PATCH 188/262] build(deps): bump github.com/spf13/cobra from 1.6.1 to 1.8.0 (#1530) Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.6.1 to 1.8.0. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.6.1...v1.8.0) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 4 ++-- go.sum | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index e807e96c4..be2e458cc 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/magiconair/properties v1.8.7 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.6.1 + github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.14.0 github.com/stretchr/testify v1.8.1 gopkg.in/yaml.v2 v2.4.0 @@ -45,7 +45,7 @@ require ( github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgx/v5 v5.2.0 // indirect diff --git a/go.sum b/go.sum index 88e2d804e..171f1ab6d 100644 --- a/go.sum +++ b/go.sum @@ -76,7 +76,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -189,8 +189,8 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= @@ -274,8 +274,8 @@ github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= From 1393449298c87a33102d9a7842a56b1d32ae445f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 10 Dec 2023 14:07:09 +0200 Subject: [PATCH 189/262] build(deps): bump docker/setup-buildx-action from 2 to 3 (#1497) Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 2 to 3. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/v2...v3) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 5660e9dd7..111315a60 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -20,7 +20,7 @@ jobs: uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Cache Docker layers uses: actions/cache@v3 with: From f353bc4cba96610f4d2aed594468c6999969eb73 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 09:28:12 +0200 Subject: [PATCH 190/262] build(deps): bump golang from 1.21.3 to 1.21.5 (#1534) Bumps golang from 1.21.3 to 1.21.5. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.fips.ubi | 2 +- Dockerfile.ubi | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 5c00713c0..cc2db99c0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.3 AS build +FROM golang:1.21.5 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi index 2bb64afae..e56c90286 100644 --- a/Dockerfile.fips.ubi +++ b/Dockerfile.fips.ubi @@ -1,4 +1,4 @@ -FROM golang:1.21.3 AS build +FROM golang:1.21.5 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 940c83ec9..b7a213589 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.21.3 AS build +FROM golang:1.21.5 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 7a55d5d57c10a4b1fbec140bbbe9bf469130090e Mon Sep 17 00:00:00 2001 From: mjshastha <61929310+mjshastha@users.noreply.github.com> Date: Mon, 18 Dec 2023 12:40:07 +0530 Subject: [PATCH 191/262] Issue: The initial command produces "root:root" as its output only when the file is present. However, if the file is missing, the command will still run successfully, though the desired output of "root:root" won't be obtained. (#1538) Fix: To address this, we've modified the command to achieve the following: Verify the existence of the file. If the file is found, show the user and group ownership in the "username:groupname" format. If the file is not found, display the message "File not found." To accommodate this change, we've integrated the expected output "File not found" for instances where the file is absent. This adjustment ensures the successful execution of the test. Co-authored-by: mjshastha --- cfg/ack-1.0/node.yaml | 10 ++++++---- cfg/cis-1.20/node.yaml | 10 ++++++---- cfg/cis-1.23/node.yaml | 10 ++++++---- cfg/cis-1.24/node.yaml | 10 ++++++---- cfg/cis-1.5/node.yaml | 13 +++++++------ cfg/cis-1.6/node.yaml | 4 +++- cfg/cis-1.7/node.yaml | 10 ++++++---- cfg/cis-1.8/node.yaml | 10 ++++++---- 8 files changed, 46 insertions(+), 31 deletions(-) diff --git a/cfg/ack-1.0/node.yaml b/cfg/ack-1.0/node.yaml index 961dbcd63..3872880c7 100644 --- a/cfg/ack-1.0/node.yaml +++ b/cfg/ack-1.0/node.yaml @@ -25,14 +25,16 @@ groups: - id: 4.1.2 text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' tests: + bin_op: or test_items: - flag: root:root + - flag: "File not found" remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc scored: true - id: 4.1.3 diff --git a/cfg/cis-1.20/node.yaml b/cfg/cis-1.20/node.yaml index 72524ae46..f2afb76fc 100644 --- a/cfg/cis-1.20/node.yaml +++ b/cfg/cis-1.20/node.yaml @@ -25,14 +25,16 @@ groups: - id: 4.1.2 text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' tests: + bin_op: or test_items: - flag: root:root + - flag: "File not found" remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc scored: true - id: 4.1.3 diff --git a/cfg/cis-1.23/node.yaml b/cfg/cis-1.23/node.yaml index 3105d37bf..affdbf99f 100644 --- a/cfg/cis-1.23/node.yaml +++ b/cfg/cis-1.23/node.yaml @@ -24,14 +24,16 @@ groups: - id: 4.1.2 text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' tests: + bin_op: or test_items: - flag: root:root + - flag: "File not found" remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc scored: true - id: 4.1.3 diff --git a/cfg/cis-1.24/node.yaml b/cfg/cis-1.24/node.yaml index f85d7ce24..3649428af 100644 --- a/cfg/cis-1.24/node.yaml +++ b/cfg/cis-1.24/node.yaml @@ -24,14 +24,16 @@ groups: - id: 4.1.2 text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' tests: + bin_op: or test_items: - flag: root:root + - flag: "File not found" remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc scored: true - id: 4.1.3 diff --git a/cfg/cis-1.5/node.yaml b/cfg/cis-1.5/node.yaml index fe47d64cb..8823598f2 100644 --- a/cfg/cis-1.5/node.yaml +++ b/cfg/cis-1.5/node.yaml @@ -25,16 +25,17 @@ groups: scored: true - id: 4.1.2 - text: "Ensure that the kubelet service file ownership is set to root:root (Scored)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' tests: + bin_op: or test_items: - flag: root:root - set: true + - flag: "File not found" remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc scored: true - id: 4.1.3 diff --git a/cfg/cis-1.6/node.yaml b/cfg/cis-1.6/node.yaml index e3964158d..157eecce5 100644 --- a/cfg/cis-1.6/node.yaml +++ b/cfg/cis-1.6/node.yaml @@ -25,10 +25,12 @@ groups: - id: 4.1.2 text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' tests: + bin_op: or test_items: - flag: root:root + - flag: "File not found" remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, diff --git a/cfg/cis-1.7/node.yaml b/cfg/cis-1.7/node.yaml index e109d41c6..de1c29c2d 100644 --- a/cfg/cis-1.7/node.yaml +++ b/cfg/cis-1.7/node.yaml @@ -24,14 +24,16 @@ groups: - id: 4.1.2 text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' tests: + bin_op: or test_items: - flag: root:root + - flag: "File not found" remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc scored: true - id: 4.1.3 diff --git a/cfg/cis-1.8/node.yaml b/cfg/cis-1.8/node.yaml index 04f4270eb..66e7697ec 100644 --- a/cfg/cis-1.8/node.yaml +++ b/cfg/cis-1.8/node.yaml @@ -24,14 +24,16 @@ groups: - id: 4.1.2 text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' tests: + bin_op: or test_items: - flag: root:root + - flag: "File not found" remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc scored: true - id: 4.1.3 From 64c049240197f4b9ee2374c16c00dc354e5c1073 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Dec 2023 08:42:25 +0200 Subject: [PATCH 192/262] build(deps): bump docker/login-action from 2 to 3 (#1500) Bumps [docker/login-action](https://github.com/docker/login-action) from 2 to 3. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/v2...v3) --- updated-dependencies: - dependency-name: docker/login-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/publish.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 111315a60..602831e67 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -29,12 +29,12 @@ jobs: restore-keys: | ${{ runner.os }}-buildxarch- - name: Login to Docker Hub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to ECR - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: public.ecr.aws username: ${{ secrets.ECR_ACCESS_KEY_ID }} From 58a49da7137dacb93232d2c8cd6f47ceddf8d6da Mon Sep 17 00:00:00 2001 From: chenk Date: Tue, 19 Dec 2023 09:08:02 +0200 Subject: [PATCH 193/262] release: prepare v0.7.0 (#1543) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index adf81cefc..9b8ff5ef7 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:v0.6.19 + image: docker.io/aquasec/kube-bench:v0.7.0 name: kube-bench volumeMounts: - mountPath: /var/lib/etcd From 151efc34940942d021783f4bd350f9c3caf9f58d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 29 Dec 2023 20:57:02 +0200 Subject: [PATCH 194/262] build(deps): bump golang.org/x/crypto from 0.14.0 to 0.17.0 (#1542) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.14.0 to 0.17.0. - [Commits](https://github.com/golang/crypto/compare/v0.14.0...v0.17.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index be2e458cc..791d02fb9 100644 --- a/go.mod +++ b/go.mod @@ -69,12 +69,12 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/subosito/gotenv v1.4.1 // indirect - golang.org/x/crypto v0.14.0 // indirect + golang.org/x/crypto v0.17.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect - golang.org/x/sys v0.14.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect diff --git a/go.sum b/go.sum index 171f1ab6d..0b916b43c 100644 --- a/go.sum +++ b/go.sum @@ -321,8 +321,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -464,13 +464,13 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -481,8 +481,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From 8c47d59e99c4d9c6a738b97ab05f289a8be83ab5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 5 Jan 2024 07:59:17 +0200 Subject: [PATCH 195/262] build(deps): bump github.com/spf13/viper from 1.14.0 to 1.18.2 (#1541) Bumps [github.com/spf13/viper](https://github.com/spf13/viper) from 1.14.0 to 1.18.2. - [Release notes](https://github.com/spf13/viper/releases) - [Commits](https://github.com/spf13/viper/compare/v1.14.0...v1.18.2) --- updated-dependencies: - dependency-name: github.com/spf13/viper dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 36 +++--- go.sum | 400 ++++++--------------------------------------------------- 2 files changed, 62 insertions(+), 374 deletions(-) diff --git a/go.mod b/go.mod index 791d02fb9..ff19917c8 100644 --- a/go.mod +++ b/go.mod @@ -12,8 +12,8 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.8.0 - github.com/spf13/viper v1.14.0 - github.com/stretchr/testify v1.8.1 + github.com/spf13/viper v1.18.2 + github.com/stretchr/testify v1.8.4 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.4.6 gorm.io/gorm v1.25.5 @@ -32,15 +32,15 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 // indirect github.com/aws/smithy-go v1.13.5 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/swag v0.19.14 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.1.0 // indirect @@ -60,24 +60,28 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.5 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/afero v1.9.2 // indirect - github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.1 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.9.0 // indirect golang.org/x/crypto v0.17.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/oauth2 v0.15.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect + golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 0b916b43c..5e41b194f 100644 --- a/go.sum +++ b/go.sum @@ -1,43 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY= @@ -69,37 +31,25 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4 github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -117,22 +67,10 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -140,23 +78,16 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= @@ -164,31 +95,9 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= @@ -210,16 +119,13 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -256,212 +162,115 @@ github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= -github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/afero v1.9.2 h1:j49Hj62F0n+DaZ1dDCvhABaPNSGNkt32oRFxI33IEMw= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.14.0 h1:Rg7d3Lo706X9tHsJMUjdiwMpHB7W8WnSVOssIY+JElU= -github.com/spf13/viper v1.14.0/go.mod h1:WT//axPky3FdvXHzGw33dNdXXXfFQqmEalje+egj8As= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= -github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= @@ -471,158 +280,42 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= -golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -632,11 +325,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -667,12 +359,7 @@ gorm.io/gorm v1.24.2/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= @@ -685,9 +372,6 @@ k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+O k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= From 221ff4fd428163fc3d8bfc44b14ab97ad1f44b64 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 6 Jan 2024 11:12:57 +0200 Subject: [PATCH 196/262] build(deps): bump actions/setup-go from 4 to 5 (#1537) Bumps [actions/setup-go](https://github.com/actions/setup-go) from 4 to 5. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/build.yml | 8 ++++---- .github/workflows/release.yml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f0cd14fa8..41c6f0bc8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - name: Checkout code @@ -41,7 +41,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - name: Checkout code @@ -57,7 +57,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - name: Checkout code @@ -87,7 +87,7 @@ jobs: needs: [e2e, unit] steps: - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - name: Checkout code diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index df119484f..818123f38 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@v4 + uses: actions/setup-go@v5 with: go-version: ${{ env.GO_VERSION }} - name: Checkout code From a4b46f50de67d7bdd74f1b6f6c573af204c9389c Mon Sep 17 00:00:00 2001 From: Devendra Turkar Date: Wed, 10 Jan 2024 17:56:50 +0530 Subject: [PATCH 197/262] chore: update go version to 1.21 (#1546) Updating go version to 1.21 --- .github/workflows/build.yml | 2 +- .github/workflows/release.yml | 2 +- go.mod | 6 +++--- go.sum | 13 +++++++++---- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 41c6f0bc8..070d24b33 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -14,7 +14,7 @@ on: - "LICENSE" - "NOTICE" env: - GO_VERSION: "1.19" + GO_VERSION: "1.21" KIND_VERSION: "v0.11.1" KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 818123f38..4fc523f6f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,7 +5,7 @@ on: tags: - "v*" env: - GO_VERSION: "1.19" + GO_VERSION: "1.21" KIND_VERSION: "v0.11.1" KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6" diff --git a/go.mod b/go.mod index ff19917c8..16a5cf17e 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,13 @@ module github.com/aquasecurity/kube-bench -go 1.19 +go 1.21 require ( github.com/aws/aws-sdk-go-v2 v1.18.0 github.com/aws/aws-sdk-go-v2/config v1.18.4 github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 github.com/fatih/color v1.16.0 - github.com/golang/glog v1.1.2 + github.com/golang/glog v1.2.0 github.com/magiconair/properties v1.8.7 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 @@ -42,7 +42,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect diff --git a/go.sum b/go.sum index 5e41b194f..fe4c32a3f 100644 --- a/go.sum +++ b/go.sum @@ -46,6 +46,7 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= @@ -65,8 +66,8 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg78 github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -90,8 +91,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -126,6 +127,7 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -159,9 +161,11 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= +github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -172,6 +176,7 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= From 7efba2b94d15d145ccd1bb25262d87ea97900fc1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Jan 2024 10:34:21 +0200 Subject: [PATCH 198/262] build(deps): bump k8s.io/client-go from 0.26.0 to 0.29.0 (#1540) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.26.0 to 0.29.0. - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.26.0...v0.29.0) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 34 +++++++------- go.sum | 141 ++++++++++++++++++++------------------------------------- 2 files changed, 65 insertions(+), 110 deletions(-) diff --git a/go.mod b/go.mod index 16a5cf17e..8fe548f73 100644 --- a/go.mod +++ b/go.mod @@ -17,8 +17,8 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.4.6 gorm.io/gorm v1.25.5 - k8s.io/apimachinery v0.26.0 - k8s.io/client-go v0.26.0 + k8s.io/apimachinery v0.29.0 + k8s.io/client-go v0.29.0 ) require ( @@ -33,17 +33,17 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 // indirect github.com/aws/smithy-go v1.13.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.4.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect @@ -53,7 +53,7 @@ require ( github.com/jinzhu/now v1.1.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -85,11 +85,11 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.26.0 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect - k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + k8s.io/api v0.29.0 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index fe4c32a3f..33fd24822 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,3 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY= @@ -30,19 +28,14 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 h1:VQFOLQVL3BrKM/NLO/7FiS4vcp5b github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4R75qFYnO641RZGAoV3uH1c= github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -51,51 +44,49 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -123,7 +114,6 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -134,10 +124,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -152,7 +140,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -160,12 +147,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= -github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -173,10 +160,9 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= @@ -194,17 +180,16 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= @@ -224,20 +209,12 @@ golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -250,17 +227,14 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -297,47 +271,31 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -355,7 +313,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.4.6 h1:1FPESNXqIKG5JmraaH2bfCVlMQ7paLoCreFxDtqzwdc= @@ -363,23 +320,21 @@ gorm.io/driver/postgres v1.4.6/go.mod h1:UJChCNLFKeBqQRE+HrkFUbKbq9idPXmTOk2u4Wo gorm.io/gorm v1.24.2/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= -k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= -k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= -k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= -k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= +k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= +k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= +k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= From cc6c091b41804f6b8d525c23acc471a952799d5a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Jan 2024 11:02:00 +0200 Subject: [PATCH 199/262] build(deps): bump gorm.io/driver/postgres from 1.4.6 to 1.5.4 (#1514) Bumps [gorm.io/driver/postgres](https://github.com/go-gorm/postgres) from 1.4.6 to 1.5.4. - [Commits](https://github.com/go-gorm/postgres/compare/v1.4.6...v1.5.4) --- updated-dependencies: - dependency-name: gorm.io/driver/postgres dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 42 ++++-------------------------------------- 2 files changed, 6 insertions(+), 40 deletions(-) diff --git a/go.mod b/go.mod index 8fe548f73..c4c3ff970 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.8.4 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.4.6 + gorm.io/driver/postgres v1.5.4 gorm.io/gorm v1.25.5 k8s.io/apimachinery v0.29.0 k8s.io/client-go v0.29.0 @@ -48,7 +48,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/pgx/v5 v5.2.0 // indirect + github.com/jackc/pgx/v5 v5.4.3 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/josharian/intern v1.0.0 // indirect diff --git a/go.sum b/go.sum index 33fd24822..dbac14a00 100644 --- a/go.sum +++ b/go.sum @@ -94,15 +94,12 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.2.0 h1:NdPpngX0Y6z6XDFKqmFQaE+bCtkqzvQIOt1wvBlAqs8= -github.com/jackc/pgx/v5 v5.2.0/go.mod h1:Ptn7zmohNsWEsdxRawMzk3gaKma2obW+NWTnKa0S4nk= -github.com/jackc/puddle/v2 v2.1.2/go.mod h1:2lpufsF5mRHO6SuZkm0fNYxM6SWHfvyFj62KwNzgels= +github.com/jackc/pgx/v5 v5.4.3 h1:cxFyXhxlvAifxnkKKdlxv8XqUf59tDlYjnV5YYfsJJY= +github.com/jackc/pgx/v5 v5.4.3/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.1.4/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -113,9 +110,7 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -160,7 +155,6 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -196,7 +190,6 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= @@ -204,16 +197,12 @@ go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTV golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= @@ -221,10 +210,6 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= @@ -233,8 +218,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -243,29 +226,16 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -275,7 +245,6 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -295,10 +264,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -315,9 +282,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.4.6 h1:1FPESNXqIKG5JmraaH2bfCVlMQ7paLoCreFxDtqzwdc= -gorm.io/driver/postgres v1.4.6/go.mod h1:UJChCNLFKeBqQRE+HrkFUbKbq9idPXmTOk2u4Wok8S4= -gorm.io/gorm v1.24.2/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA= +gorm.io/driver/postgres v1.5.4 h1:Iyrp9Meh3GmbSuyIAGyjkN+n9K+GHX9b9MqsTL4EJCo= +gorm.io/driver/postgres v1.5.4/go.mod h1:Bgo89+h0CRcdA33Y6frlaHHVuTdOf87pmyzwW9C/BH0= gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= From 39c29fb07a5bc14bff8855fc62c81a9858acfb66 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 12 Jan 2024 08:01:55 +0200 Subject: [PATCH 200/262] build(deps): bump alpine from 3.18.3 to 3.19.0 (#1535) Bumps alpine from 3.18.3 to 3.19.0. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index cc2db99c0..b289d618f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.18.3 AS run +FROM alpine:3.19.0 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 From 38949874d187585d00e44956f62801ae3e170637 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 20 Jan 2024 12:22:18 +0200 Subject: [PATCH 201/262] build(deps): bump github.com/aws/aws-sdk-go-v2 from 1.18.0 to 1.24.1 (#1550) Bumps [github.com/aws/aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2) from 1.18.0 to 1.24.1. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.18.0...v1.24.1) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index c4c3ff970..be5a316a5 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.21 require ( - github.com/aws/aws-sdk-go-v2 v1.18.0 + github.com/aws/aws-sdk-go-v2 v1.24.1 github.com/aws/aws-sdk-go-v2/config v1.18.4 github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 github.com/fatih/color v1.16.0 @@ -31,7 +31,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 // indirect - github.com/aws/smithy-go v1.13.5 // indirect + github.com/aws/smithy-go v1.19.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect diff --git a/go.sum b/go.sum index dbac14a00..e41340496 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.18.0 h1:882kkTpSFhdgYRKVZ/VCgf7sd0ru57p2JCxz4/oN5RY= -github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= +github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2/config v1.18.4 h1:VZKhr3uAADXHStS/Gf9xSYVmmaluTUfkc0dcbPiDsKE= github.com/aws/aws-sdk-go-v2/config v1.18.4/go.mod h1:EZxMPLSdGAZ3eAmkqXfYbRppZJTzFTkv8VyEzJhKko4= github.com/aws/aws-sdk-go-v2/credentials v1.13.4 h1:nEbHIyJy7mCvQ/kzGG7VWHSBpRB4H6sJy3bWierWUtg= @@ -26,8 +26,9 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 h1:wihKuqYUlA2T/Rx+yu2s6NDA github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9/go.mod h1:2E/3D/mB8/r2J7nK42daoKP/ooCwbf0q1PznNc+DZTU= github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 h1:VQFOLQVL3BrKM/NLO/7FiS4vcp5bqK0mGMyk09xLoAY= github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4R75qFYnO641RZGAoV3uH1c= -github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= +github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From 13da372a87f2ffdd0b563749c0635bfc9af0de5e Mon Sep 17 00:00:00 2001 From: Kiran Bodipi <62982917+KiranBodipi@users.noreply.github.com> Date: Tue, 23 Jan 2024 12:26:40 +0530 Subject: [PATCH 202/262] Updating the rh-1.0 OCP checks (#1548) 1. Added audit commands wherever required. 2. Updated the scripts with type to manual to match the title. 3. Updated the scripts with test_items wherever required. 4. Fixed a typo. --- cfg/rh-1.0/etcd.yaml | 2 +- cfg/rh-1.0/master.yaml | 7 ++++++- cfg/rh-1.0/node.yaml | 11 ++++++----- cfg/rh-1.0/policies.yaml | 31 +++++++++++++++++++++++++++++++ 4 files changed, 44 insertions(+), 7 deletions(-) diff --git a/cfg/rh-1.0/etcd.yaml b/cfg/rh-1.0/etcd.yaml index d1844a283..2fa789850 100644 --- a/cfg/rh-1.0/etcd.yaml +++ b/cfg/rh-1.0/etcd.yaml @@ -67,7 +67,7 @@ groups: op: eq value: "1" remediation: | - This setting is managed by the cluster etcd operator. No remediation required.e + This setting is managed by the cluster etcd operator. No remediation required. scored: false - id: 2.4 diff --git a/cfg/rh-1.0/master.yaml b/cfg/rh-1.0/master.yaml index 8866a42e2..88589082a 100644 --- a/cfg/rh-1.0/master.yaml +++ b/cfg/rh-1.0/master.yaml @@ -864,7 +864,6 @@ groups: remediation: | Follow the documentation for log forwarding. Forwarding logs to third party systems https://docs.openshift.com/container-platform/4.5/logging/cluster-logging-external.html - scored: false - id: 1.2.24 @@ -1070,6 +1069,12 @@ groups: - id: 1.2.35 text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" type: manual + audit: | + # verify cipher suites + oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo + oc get kubeapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo + oc get openshiftapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo + oc describe --namespace=openshift-ingress-operator ingresscontroller/default remediation: | Verify that the tlsSecurityProfile is set to the value you chose. Note: The HAProxy Ingress controller image does not support TLS 1.3 diff --git a/cfg/rh-1.0/node.yaml b/cfg/rh-1.0/node.yaml index b22dcea85..0ea5682ff 100644 --- a/cfg/rh-1.0/node.yaml +++ b/cfg/rh-1.0/node.yaml @@ -222,15 +222,12 @@ groups: audit: | for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') do - oc debug node/${node} -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf + oc debug node/${node} -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf | awk -F': ' '{ print "clientCAFile=" $2 }' done 2> /dev/null use_multiple_values: true tests: test_items: - - flag: "clientCAFile" - compare: - op: eq - value: "/etc/kubernetes/kubelet-ca.crt" + - flag: clientCAFile="/etc/kubernetes/kubelet-ca.crt" remediation: | None required. Changing the clientCAFile value is unsupported. scored: true @@ -278,6 +275,10 @@ groups: compare: op: noteq value: 0 + - flag: streamingConnectionIdleTimeout + compare: + op: noteq + value: 0s - flag: "exit_code" compare: op: eq diff --git a/cfg/rh-1.0/policies.yaml b/cfg/rh-1.0/policies.yaml index 2a629b422..95de04e4a 100644 --- a/cfg/rh-1.0/policies.yaml +++ b/cfg/rh-1.0/policies.yaml @@ -11,6 +11,12 @@ groups: - id: 5.1.1 text: "Ensure that the cluster-admin role is only used where required (Manual)" type: "manual" + audit: | + #To get a list of users and service accounts with the cluster-admin role + oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | + grep cluster-admin + #To verity that kbueadmin is removed, no results should be returned + oc get secrets kubeadmin -n kube-system remediation: | Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges. @@ -29,6 +35,15 @@ groups: - id: 5.1.3 text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" type: "manual" + audit: | + #needs verification + oc get roles --all-namespaces -o yaml + for i in $(oc get roles -A -o jsonpath='{.items[*].metadata.name}'); do oc + describe clusterrole ${i}; done + #Retrieve the cluster roles defined in the cluster and review for wildcards + oc get clusterroles -o yaml + for i in $(oc get clusterroles -o jsonpath='{.items[*].metadata.name}'); do + oc describe clusterrole ${i}; done remediation: | Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions. @@ -213,6 +228,9 @@ groups: - id: 5.3.2 text: "Ensure that all Namespaces have Network Policies defined (Manual)" type: "manual" + audit: | + #Run the following command and review the NetworkPolicy objects created in the cluster. + oc -n all get networkpolicy remediation: | Follow the documentation and create NetworkPolicy objects as you need them. scored: false @@ -223,6 +241,10 @@ groups: - id: 5.4.1 text: "Prefer using secrets as files over secrets as environment variables (Manual)" type: "manual" + audit: | + #Run the following command to find references to objects which use environment variables defined from secrets. + oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} + {.metadata.name} {"\n"}{end}' -A remediation: | If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables. @@ -252,6 +274,10 @@ groups: - id: 5.7.1 text: "Create administrative boundaries between resources using namespaces (Manual)" type: "manual" + audit: | + #Run the following command and review the namespaces created in the cluster. + oc get namespaces + #Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. remediation: | Follow the documentation and create namespaces for objects in your deployment as you need them. @@ -277,6 +303,11 @@ groups: - id: 5.7.4 text: "The default namespace should not be used (Manual)" type: "manual" + audit: | + #Run this command to list objects in default namespace + oc project default + oc get all + #The only entries there should be system managed resources such as the kubernetes and openshift service remediation: | Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace. From 628999c9c5ed0d993a098fbfc6aa3211e93115f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Jan 2024 13:12:14 +0200 Subject: [PATCH 203/262] build(deps): bump golang from 1.21.5 to 1.21.6 (#1549) Bumps golang from 1.21.5 to 1.21.6. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- Dockerfile | 2 +- Dockerfile.fips.ubi | 2 +- Dockerfile.ubi | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index b289d618f..9bbcf8ce5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.5 AS build +FROM golang:1.21.6 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi index e56c90286..3743a712c 100644 --- a/Dockerfile.fips.ubi +++ b/Dockerfile.fips.ubi @@ -1,4 +1,4 @@ -FROM golang:1.21.5 AS build +FROM golang:1.21.6 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi b/Dockerfile.ubi index b7a213589..b37d665b8 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.21.5 AS build +FROM golang:1.21.6 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From faa1b4be3db87b1e4af732141af42ce1a9c70ef3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Jan 2024 13:40:45 +0200 Subject: [PATCH 204/262] build(deps): bump actions/cache from 3 to 4 (#1551) Bumps [actions/cache](https://github.com/actions/cache) from 3 to 4. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 602831e67..681ae0a02 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -22,7 +22,7 @@ jobs: id: buildx uses: docker/setup-buildx-action@v3 - name: Cache Docker layers - uses: actions/cache@v3 + uses: actions/cache@v4 with: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildxarch-${{ github.sha }} From b17aa709b38181e9588977de911f3df84eecd646 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 27 Jan 2024 11:18:39 +0200 Subject: [PATCH 205/262] build(deps): bump k8s.io/apimachinery from 0.29.0 to 0.29.1 (#1553) Bumps [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) from 0.29.0 to 0.29.1. - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.29.0...v0.29.1) --- updated-dependencies: - dependency-name: k8s.io/apimachinery dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 2 +- go.sum | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index be5a316a5..fa597c8bb 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.5.4 gorm.io/gorm v1.25.5 - k8s.io/apimachinery v0.29.0 + k8s.io/apimachinery v0.29.1 k8s.io/client-go v0.29.0 ) diff --git a/go.sum b/go.sum index e41340496..da9137530 100644 --- a/go.sum +++ b/go.sum @@ -246,8 +246,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -289,8 +289,8 @@ gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= -k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= -k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= +k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= +k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= From a93b19f0c0fa4ace12986208d5b4c6e3d7bdc7cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 28 Jan 2024 12:21:47 +0200 Subject: [PATCH 206/262] build(deps): bump k8s.io/client-go from 0.29.0 to 0.29.1 (#1552) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.29.0 to 0.29.1. - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.29.0...v0.29.1) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index fa597c8bb..3b930b485 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( gorm.io/driver/postgres v1.5.4 gorm.io/gorm v1.25.5 k8s.io/apimachinery v0.29.1 - k8s.io/client-go v0.29.0 + k8s.io/client-go v0.29.1 ) require ( @@ -85,7 +85,7 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.0 // indirect + k8s.io/api v0.29.1 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect diff --git a/go.sum b/go.sum index da9137530..c6e254515 100644 --- a/go.sum +++ b/go.sum @@ -287,12 +287,12 @@ gorm.io/driver/postgres v1.5.4 h1:Iyrp9Meh3GmbSuyIAGyjkN+n9K+GHX9b9MqsTL4EJCo= gorm.io/driver/postgres v1.5.4/go.mod h1:Bgo89+h0CRcdA33Y6frlaHHVuTdOf87pmyzwW9C/BH0= gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= -k8s.io/api v0.29.0 h1:NiCdQMY1QOp1H8lfRyeEf8eOwV6+0xA6XEE44ohDX2A= -k8s.io/api v0.29.0/go.mod h1:sdVmXoz2Bo/cb77Pxi71IPTSErEW32xa4aXwKH7gfBA= +k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= +k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= -k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= +k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= From 57fba224fafd0cc69e88552f3bcc1fdf522174ac Mon Sep 17 00:00:00 2001 From: Devendra Turkar Date: Wed, 31 Jan 2024 13:21:08 +0530 Subject: [PATCH 207/262] chore: update base image to ubi9 (#1556) --- Dockerfile.fips.ubi | 2 +- Dockerfile.ubi | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi index 3743a712c..3142d75b9 100644 --- a/Dockerfile.fips.ubi +++ b/Dockerfile.fips.ubi @@ -11,7 +11,7 @@ RUN make build-fips && cp kube-bench /go/bin/kube-bench # ubi8-minimal base image for build with ubi standards -FROM registry.access.redhat.com/ubi8/ubi-minimal as run +FROM registry.access.redhat.com/ubi9/ubi-minimal as run RUN microdnf install -y yum findutils openssl \ && yum -y update-minimal --security --sec-severity=Moderate --sec-severity=Important --sec-severity=Critical \ diff --git a/Dockerfile.ubi b/Dockerfile.ubi index b37d665b8..071711004 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -11,7 +11,7 @@ RUN make build && cp kube-bench /go/bin/kube-bench # ubi8-minimal base image for build with ubi standards -FROM registry.access.redhat.com/ubi8/ubi-minimal as run +FROM registry.access.redhat.com/ubi9/ubi-minimal as run RUN microdnf install -y yum findutils openssl \ && yum -y update-minimal --security --sec-severity=Moderate --sec-severity=Important --sec-severity=Critical \ From 445c1160cf8e3c54982f4a95d375ac712ed03f4c Mon Sep 17 00:00:00 2001 From: chenk Date: Wed, 31 Jan 2024 11:57:16 +0200 Subject: [PATCH 208/262] release: prepare v0.7.1 (#1559) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 9b8ff5ef7..95654ac79 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:v0.7.0 + image: docker.io/aquasec/kube-bench:v0.7.1 name: kube-bench volumeMounts: - mountPath: /var/lib/etcd From 30217061ac4794e9e08c2aa77f74eed3e57c9a39 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 3 Feb 2024 09:35:10 +0200 Subject: [PATCH 209/262] build(deps): bump github.com/aws/aws-sdk-go-v2/config (#1554) Bumps [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) from 1.18.4 to 1.26.6. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.18.4...config/v1.26.6) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 21 +++++++++++---------- go.sum | 43 ++++++++++++++++++++++--------------------- 2 files changed, 33 insertions(+), 31 deletions(-) diff --git a/go.mod b/go.mod index 3b930b485..fd6fd2f49 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/aws/aws-sdk-go-v2 v1.24.1 - github.com/aws/aws-sdk-go-v2/config v1.18.4 + github.com/aws/aws-sdk-go-v2/config v1.26.6 github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 github.com/fatih/color v1.16.0 github.com/golang/glog v1.2.0 @@ -22,15 +22,16 @@ require ( ) require ( - github.com/aws/aws-sdk-go-v2/credentials v1.13.4 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect github.com/aws/smithy-go v1.19.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect diff --git a/go.sum b/go.sum index c6e254515..1e3a41b04 100644 --- a/go.sum +++ b/go.sum @@ -1,31 +1,32 @@ -github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= -github.com/aws/aws-sdk-go-v2/config v1.18.4 h1:VZKhr3uAADXHStS/Gf9xSYVmmaluTUfkc0dcbPiDsKE= -github.com/aws/aws-sdk-go-v2/config v1.18.4/go.mod h1:EZxMPLSdGAZ3eAmkqXfYbRppZJTzFTkv8VyEzJhKko4= -github.com/aws/aws-sdk-go-v2/credentials v1.13.4 h1:nEbHIyJy7mCvQ/kzGG7VWHSBpRB4H6sJy3bWierWUtg= -github.com/aws/aws-sdk-go-v2/credentials v1.13.4/go.mod h1:/Cj5w9LRsNTLSwexsohwDME32OzJ6U81Zs33zr2ZWOM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 h1:tpNOglTZ8kg9T38NpcGBxudqfUAwUzyUnLQ4XSd0CHE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20/go.mod h1:d9xFpWd3qYwdIXM0fvu7deD08vvdRXyc/ueV+0SqaWE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26/go.mod h1:2E0LdbJW6lbeU4uxjum99GZzI0ZjDpAb0CoSCM0oeEY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 h1:y+8n9AGDjikyXoMBTRaHHHSaFEB8267ykmvyPodJfys= +github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o= +github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4= +github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20/go.mod h1:/+6lSiby8TBFpTVXZgKiN/rCfkYXEGvhlM4zCgPpt7w= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 h1:r+Kv+SEJquhAZXaJ7G4u44cIwXV3f8K+N482NNAzJZA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24/go.mod h1:gAuCezX/gob6BSMbItsSlMb6WZGV7K2+fWOvk8xBSto= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 h1:N2eKFw2S+JWRCtTt0IhIX7uoGGQciD4p6ba+SJv4WEU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27/go.mod h1:RdwFVc7PBYWY33fa2+8T1mSqQ7ZEK4ILpM0wfioDC3w= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 h1:jlgyHbkZQAgAc7VIxJDmtouH8eNjOk2REVAQfVhdaiQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20/go.mod h1:Xs52xaLBqDEKRcAfX/hgjmD3YQ7c/W+BEyfamlO/W2E= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 h1:+lpa31bGPPvgpZwUJ4ldKRCsPukzJ0PqoO5AQ9S79oQ= github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1/go.mod h1:vKGWzDG4Ytw3hgv/FvNy0HX/XEoJ6k/e7KAANzXWP8Y= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 h1:ActQgdTNQej/RuUJjB9uxYVLDOvRGtUreXF8L3c8wyg= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.26/go.mod h1:uB9tV79ULEZUXc6Ob18A46KSQ0JDlrplPni9XW6Ot60= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 h1:wihKuqYUlA2T/Rx+yu2s6NDAns8B9DgnRooB1PVhY+Q= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9/go.mod h1:2E/3D/mB8/r2J7nK42daoKP/ooCwbf0q1PznNc+DZTU= -github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 h1:VQFOLQVL3BrKM/NLO/7FiS4vcp5bqK0mGMyk09xLoAY= -github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4R75qFYnO641RZGAoV3uH1c= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= From faeceb5dfa346c788c5daa6b8efe26cb591f233b Mon Sep 17 00:00:00 2001 From: Andrey Polovov Date: Sun, 11 Feb 2024 12:23:17 +0300 Subject: [PATCH 210/262] job.yaml: Adding /var/lib/cni mounts for proper CIS 1.1.9 and 1.1.0 checking (#1547) Signed-off-by: Andrey Polovov Signed-off-by: Andrey Pavlov Co-authored-by: Andrey Pavlov Co-authored-by: chenk --- job-master.yaml | 6 ++++++ job-node.yaml | 6 ++++++ job.yaml | 6 ++++++ 3 files changed, 18 insertions(+) diff --git a/job-master.yaml b/job-master.yaml index 34f24f2f1..4df64d3dc 100644 --- a/job-master.yaml +++ b/job-master.yaml @@ -29,6 +29,9 @@ spec: image: docker.io/aquasec/kube-bench:latest command: ["kube-bench", "run", "--targets", "master"] volumeMounts: + - name: var-lib-cni + mountPath: /var/lib/cni + readOnly: true - name: var-lib-etcd mountPath: /var/lib/etcd readOnly: true @@ -72,6 +75,9 @@ spec: readOnly: true restartPolicy: Never volumes: + - name: var-lib-cni + hostPath: + path: "/var/lib/cni" - name: var-lib-etcd hostPath: path: "/var/lib/etcd" diff --git a/job-node.yaml b/job-node.yaml index 0e6f9a9f9..1aa7023df 100644 --- a/job-node.yaml +++ b/job-node.yaml @@ -12,6 +12,9 @@ spec: image: docker.io/aquasec/kube-bench:latest command: ["kube-bench", "run", "--targets", "node"] volumeMounts: + - name: var-lib-cni + mountPath: /var/lib/cni + readOnly: true - name: var-lib-etcd mountPath: /var/lib/etcd readOnly: true @@ -49,6 +52,9 @@ spec: readOnly: true restartPolicy: Never volumes: + - name: var-lib-cni + hostPath: + path: "/var/lib/cni" - name: var-lib-etcd hostPath: path: "/var/lib/etcd" diff --git a/job.yaml b/job.yaml index 95654ac79..049f305c3 100644 --- a/job.yaml +++ b/job.yaml @@ -14,6 +14,9 @@ spec: image: docker.io/aquasec/kube-bench:v0.7.1 name: kube-bench volumeMounts: + - name: var-lib-cni + mountPath: /var/lib/cni + readOnly: true - mountPath: /var/lib/etcd name: var-lib-etcd readOnly: true @@ -50,6 +53,9 @@ spec: hostPID: true restartPolicy: Never volumes: + - name: var-lib-cni + hostPath: + path: /var/lib/cni - hostPath: path: /var/lib/etcd name: var-lib-etcd From 2374e7b07fa85e6e5276f94e02930d9bf4b5e507 Mon Sep 17 00:00:00 2001 From: Kiran Bodipi <62982917+KiranBodipi@users.noreply.github.com> Date: Mon, 12 Feb 2024 18:59:36 +0530 Subject: [PATCH 211/262] Rancher checks correction (#1563) 1. Have modified test criteria such that it produces right output in case of there is no file exists. 2. Have modified the tests wherever root:root is checked multiple times. --- cfg/k3s-cis-1.23/node.yaml | 3 --- cfg/k3s-cis-1.24/node.yaml | 3 --- cfg/k3s-cis-1.7/node.yaml | 3 --- cfg/rke-cis-1.23/node.yaml | 3 --- cfg/rke-cis-1.24/node.yaml | 12 ++++++------ cfg/rke-cis-1.7/node.yaml | 3 --- cfg/rke2-cis-1.23/node.yaml | 3 --- cfg/rke2-cis-1.24/node.yaml | 3 --- cfg/rke2-cis-1.7/node.yaml | 3 --- 9 files changed, 6 insertions(+), 30 deletions(-) diff --git a/cfg/k3s-cis-1.23/node.yaml b/cfg/k3s-cis-1.23/node.yaml index 80f0c5a85..c0b60dfe2 100644 --- a/cfg/k3s-cis-1.23/node.yaml +++ b/cfg/k3s-cis-1.23/node.yaml @@ -149,9 +149,6 @@ groups: tests: test_items: - flag: root:root - compare: - op: eq - value: root:root remediation: | Run the following command to modify the ownership of the --client-ca-file. chown root:root diff --git a/cfg/k3s-cis-1.24/node.yaml b/cfg/k3s-cis-1.24/node.yaml index 3b9509103..82ddff0fb 100644 --- a/cfg/k3s-cis-1.24/node.yaml +++ b/cfg/k3s-cis-1.24/node.yaml @@ -118,9 +118,6 @@ groups: tests: test_items: - flag: root:root - compare: - op: eq - value: root:root remediation: | Run the following command to modify the ownership of the --client-ca-file. chown root:root diff --git a/cfg/k3s-cis-1.7/node.yaml b/cfg/k3s-cis-1.7/node.yaml index 9941e6cfb..780bb4d8b 100644 --- a/cfg/k3s-cis-1.7/node.yaml +++ b/cfg/k3s-cis-1.7/node.yaml @@ -114,9 +114,6 @@ groups: tests: test_items: - flag: root:root - compare: - op: eq - value: root:root remediation: | Run the following command to modify the ownership of the --client-ca-file. chown root:root diff --git a/cfg/rke-cis-1.23/node.yaml b/cfg/rke-cis-1.23/node.yaml index 3fac4fad6..ea13f945d 100644 --- a/cfg/rke-cis-1.23/node.yaml +++ b/cfg/rke-cis-1.23/node.yaml @@ -111,9 +111,6 @@ groups: tests: test_items: - flag: root:root - compare: - op: eq - value: root:root remediation: | Run the following command to modify the ownership of the --client-ca-file. chown root:root diff --git a/cfg/rke-cis-1.24/node.yaml b/cfg/rke-cis-1.24/node.yaml index ce7762baa..71f1b7d86 100644 --- a/cfg/rke-cis-1.24/node.yaml +++ b/cfg/rke-cis-1.24/node.yaml @@ -94,27 +94,27 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" - audit: "stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem" + audit: '/bin/sh -c "if test -e /node/etc/kubernetes/ssl/kube-ca.pem; then stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem; else echo \"File not found\"; fi"' tests: + bin_op: or test_items: - flag: "permissions" compare: op: bitmask value: "600" + - flag: "File not found" remediation: | Run the following command to modify the file permissions of the --client-ca-file chmod 600 scored: true - - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" - audit: "stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem" + audit: '/bin/sh -c "if test -e /node/etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem; else echo \"File not found\"; fi"' tests: + bin_op: or test_items: - flag: root:root - compare: - op: eq - value: root:root + - flag: "File not found" remediation: | Run the following command to modify the ownership of the --client-ca-file. chown root:root diff --git a/cfg/rke-cis-1.7/node.yaml b/cfg/rke-cis-1.7/node.yaml index 8c9ec4c27..ff5731732 100644 --- a/cfg/rke-cis-1.7/node.yaml +++ b/cfg/rke-cis-1.7/node.yaml @@ -116,9 +116,6 @@ groups: tests: test_items: - flag: root:root - compare: - op: eq - value: root:root remediation: | Run the following command to modify the ownership of the --client-ca-file. chown root:root diff --git a/cfg/rke2-cis-1.23/node.yaml b/cfg/rke2-cis-1.23/node.yaml index 596208a73..bbb015f32 100644 --- a/cfg/rke2-cis-1.23/node.yaml +++ b/cfg/rke2-cis-1.23/node.yaml @@ -119,9 +119,6 @@ groups: tests: test_items: - flag: root:root - compare: - op: eq - value: root:root remediation: | Run the following command to modify the ownership of the --client-ca-file. chown root:root diff --git a/cfg/rke2-cis-1.24/node.yaml b/cfg/rke2-cis-1.24/node.yaml index bfbc24d44..b99703fc4 100644 --- a/cfg/rke2-cis-1.24/node.yaml +++ b/cfg/rke2-cis-1.24/node.yaml @@ -119,9 +119,6 @@ groups: tests: test_items: - flag: root:root - compare: - op: eq - value: root:root remediation: | Run the following command to modify the ownership of the --client-ca-file. chown root:root diff --git a/cfg/rke2-cis-1.7/node.yaml b/cfg/rke2-cis-1.7/node.yaml index 765a3dd32..155aef7d4 100644 --- a/cfg/rke2-cis-1.7/node.yaml +++ b/cfg/rke2-cis-1.7/node.yaml @@ -120,9 +120,6 @@ groups: tests: test_items: - flag: root:root - compare: - op: eq - value: root:root remediation: | Run the following command to modify the ownership of the --client-ca-file. chown root:root From ee5e4aff515dbf72c9d6e0ed9483d74c216d9617 Mon Sep 17 00:00:00 2001 From: Kiran Bodipi <62982917+KiranBodipi@users.noreply.github.com> Date: Thu, 15 Feb 2024 15:04:31 +0530 Subject: [PATCH 212/262] update rke-cis-1.24 benchmarks: corrected errors and tests (#1570) corrected few benchmarks with title and respective tests Handled type and title mismatch Added missing audit commands --- cfg/rke-cis-1.24/controlplane.yaml | 2 +- cfg/rke-cis-1.24/master.yaml | 74 ++++++++++++------------- cfg/rke-cis-1.24/node.yaml | 21 +++---- cfg/rke-cis-1.24/policies.yaml | 88 +++++++++++++++++++++++++----- 4 files changed, 118 insertions(+), 67 deletions(-) diff --git a/cfg/rke-cis-1.24/controlplane.yaml b/cfg/rke-cis-1.24/controlplane.yaml index 5cd375933..227bcb221 100644 --- a/cfg/rke-cis-1.24/controlplane.yaml +++ b/cfg/rke-cis-1.24/controlplane.yaml @@ -20,7 +20,7 @@ groups: text: "Logging" checks: - id: 3.2.1 - text: "Ensure that a minimal audit policy is created (Manual)" + text: "Ensure that a minimal audit policy is created (Automated)" audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" tests: test_items: diff --git a/cfg/rke-cis-1.24/master.yaml b/cfg/rke-cis-1.24/master.yaml index 394cff4e5..c08d6e57b 100644 --- a/cfg/rke-cis-1.24/master.yaml +++ b/cfg/rke-cis-1.24/master.yaml @@ -9,15 +9,16 @@ groups: text: "Control Plane Node Configuration Files" checks: - id: 1.1.1 - text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" - type: "skip" - audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf;else echo \"File not found\"; fi'" tests: + bin_op: or test_items: - flag: "permissions" compare: op: bitmask - value: "600" + value: "644" + - flag: "File not found" remediation: | Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. All configuration is passed in as arguments at container run time. @@ -138,7 +139,7 @@ groups: scored: false - id: 1.1.10 - text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)" audit: | ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G @@ -150,7 +151,7 @@ groups: Run the below command (based on the file location on your system) on the control plane node. For example, chown root:root - scored: false + scored: true - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" @@ -286,11 +287,13 @@ groups: scored: true - id: 1.1.20 - text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" - audit: "find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' | xargs stat -c permissions=%a" - use_multiple_values: true + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Automated)" + audit: | + if test -n "$(find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem')"; then find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' | xargs stat -c permissions=%a;else echo "File not found"; fi tests: + bin_op: or test_items: + - flag: "File not found" - flag: "permissions" compare: op: bitmask @@ -299,23 +302,25 @@ groups: Run the below command (based on the file location on your system) on the control plane node. For example, find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' -exec chmod -R 600 {} + - scored: false + scored: true - id: 1.1.21 - text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" - audit: "find /node/etc/kubernetes/ssl/ -name '*key.pem' | xargs stat -c permissions=%a" - use_multiple_values: true + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: | + if test -n "$(find /node/etc/kubernetes/ssl/ -name '*.pem')"; then find /node/etc/kubernetes/ssl/ -name '*.pem' | xargs stat -c permissions=%a;else echo \"File not found\"; fi tests: + bin_op: or test_items: - flag: "permissions" compare: op: bitmask value: "600" + - flag: "File not found" remediation: | Run the below command (based on the file location on your system) on the control plane node. For example, find /node/etc/kubernetes/ssl/ -name '*key.pem' -exec chmod -R 600 {} + - scored: false + scored: true - id: 1.2 text: "API Server" @@ -369,20 +374,17 @@ groups: scored: true - id: 1.2.4 - text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + text: "Ensure that the --kubelet-https argument is set to true (Automated)" audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" tests: - bin_op: and test_items: - - flag: "--kubelet-client-certificate" - - flag: "--kubelet-client-key" + - flag: "--kubelet-https" + compare: + op: eq + value: true remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and kubelets. Then, edit API server pod specification file - $apiserverconf on the control plane node and set the - kubelet client certificate and key parameters as below. - --kubelet-client-certificate= - --kubelet-client-key= + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and remove the --kubelet-https parameter. scored: true - id: 1.2.5 @@ -406,7 +408,6 @@ groups: - id: 1.2.6 text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" - type: "skip" audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" tests: test_items: @@ -471,7 +472,7 @@ groups: scored: true - id: 1.2.10 - text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + text: "Ensure that the admission control plugin EventRateLimit is set (Automated)" audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" tests: test_items: @@ -486,7 +487,7 @@ groups: and set the below parameters. --enable-admission-plugins=...,EventRateLimit,... --admission-control-config-file= - scored: false + scored: true - id: 1.2.11 text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" @@ -521,7 +522,7 @@ groups: on the control plane node and set the --enable-admission-plugins parameter to include AlwaysPullImages. --enable-admission-plugins=...,AlwaysPullImages,... - scored: false + scored: true - id: 1.2.13 text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Automated)" @@ -542,7 +543,7 @@ groups: on the control plane node and set the --enable-admission-plugins parameter to include SecurityContextDeny, unless PodSecurityPolicy is already in place. --enable-admission-plugins=...,SecurityContextDeny,... - scored: false + scored: true - id: 1.2.14 text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" @@ -810,8 +811,7 @@ groups: scored: true - id: 1.2.30 - text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" - type: "skip" + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Automated)" audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" tests: test_items: @@ -822,11 +822,10 @@ groups: Then, edit the API server pod specification file $apiserverconf on the control plane node and set the --encryption-provider-config parameter to the path of that file. For example, --encryption-provider-config= - scored: false + scored: true - id: 1.2.31 - text: "Ensure that encryption providers are appropriately configured (Manual)" - type: "skip" + text: "Ensure that encryption providers are appropriately configured (Automated)" audit: | ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi @@ -840,7 +839,7 @@ groups: Follow the Kubernetes documentation and configure a EncryptionConfig file. In this file, choose aescbc, kms or secretbox as the encryption provider. Enabling encryption changes how data can be recovered as data is encrypted. - scored: false + scored: true - id: 1.2.32 text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" @@ -862,13 +861,13 @@ groups: TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 - scored: false + scored: true - id: 1.3 text: "Controller Manager" checks: - id: 1.3.1 - text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated)" audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" tests: test_items: @@ -941,7 +940,6 @@ groups: - id: 1.3.6 text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - type: "skip" audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" tests: bin_op: or diff --git a/cfg/rke-cis-1.24/node.yaml b/cfg/rke-cis-1.24/node.yaml index 71f1b7d86..ca5dcc1ad 100644 --- a/cfg/rke-cis-1.24/node.yaml +++ b/cfg/rke-cis-1.24/node.yaml @@ -10,7 +10,6 @@ groups: checks: - id: 4.1.1 text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" - type: "skip" audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' tests: test_items: @@ -37,7 +36,7 @@ groups: scored: true - id: 4.1.3 - text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' tests: bin_op: or @@ -54,10 +53,9 @@ groups: scored: true - id: 4.1.4 - text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' tests: - bin_op: or test_items: - flag: root:root remediation: | @@ -121,8 +119,7 @@ groups: scored: true - id: 4.1.9 - text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" - type: "skip" + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' tests: test_items: @@ -136,8 +133,7 @@ groups: scored: true - id: 4.1.10 - text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" - type: "skip" + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' tests: test_items: @@ -323,7 +319,7 @@ groups: # This is one of those properties that can only be set as a command line argument. # To check if the property is set as expected, we need to parse the kubelet command # instead reading the Kubelet Configuration file. - type: "skip" + type: "manual" audit: "/bin/ps -fC $kubeletbin " tests: test_items: @@ -361,8 +357,7 @@ groups: scored: true - id: 4.2.10 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - type: "skip" + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: @@ -384,7 +379,7 @@ groups: systemctl daemon-reload systemctl restart kubelet.service When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. - scored: false + scored: true - id: 4.2.11 text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" @@ -415,7 +410,7 @@ groups: - id: 4.2.12 text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" - type: "skip" + type: "manual" audit: "/bin/ps -fC $kubeletbin" audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: diff --git a/cfg/rke-cis-1.24/policies.yaml b/cfg/rke-cis-1.24/policies.yaml index 7a8b6f53b..362771a65 100644 --- a/cfg/rke-cis-1.24/policies.yaml +++ b/cfg/rke-cis-1.24/policies.yaml @@ -43,7 +43,7 @@ groups: - id: 5.1.5 text: "Ensure that default service accounts are not actively used. (Manual)" - type: "skip" + type: "manual" audit: check_for_default_sa.sh tests: test_items: @@ -102,38 +102,78 @@ groups: - id: 5.2.3 text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" - type: "skip" + audit: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + tests: + bin_op: or + test_items: + - flag: "kubectl: not found" + - flag: "jq: not found" + - flag: count + compare: + op: gt + value: 0 remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers. - scored: false + scored: true - id: 5.2.4 text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" - type: "skip" + audit: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + tests: + bin_op: or + test_items: + - flag: "kubectl: not found" + - flag: "jq: not found" + - flag: count + compare: + op: gt + value: 0 remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers. - scored: false + scored: true - id: 5.2.5 text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" - type: "skip" + audit: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + tests: + bin_op: or + test_items: + - flag: "kubectl: not found" + - flag: "jq: not found" + - flag: count + compare: + op: gt + value: 0 remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers. - scored: false + scored: true - id: 5.2.6 text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" - type: "manual" + audit: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + tests: + bin_op: or + test_items: + - flag: "kubectl: not found" + - flag: "jq: not found" + - flag: count + compare: + op: gt + value: 0 remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. - scored: false + scored: true - id: 5.2.7 - text: "Minimize the admission of root containers (Automated)" + text: "Minimize the admission of root containers (Manual)" type: "manual" remediation: | Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` @@ -141,7 +181,7 @@ groups: scored: false - id: 5.2.8 - text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" type: "manual" remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the @@ -149,7 +189,7 @@ groups: scored: false - id: 5.2.9 - text: "Minimize the admission of containers with added capabilities (Automated)" + text: "Minimize the admission of containers with added capabilities (Manual)" type: "manual" remediation: | Ensure that `allowedCapabilities` is not present in policies for the cluster unless @@ -269,9 +309,27 @@ groups: scored: false - id: 5.7.4 - text: "The default namespace should not be used (Manual)" - type: "skip" + text: "The default namespace should not be used (Automated)" + audit: | + #!/bin/bash + set -eE + handle_error() { + echo "false" + } + trap 'handle_error' ERR + count=$(kubectl get all -n default -o json | jq .items[] | jq -r 'select((.metadata.name!="kubernetes"))' | jq .metadata.name | wc -l) + if [[ ${count} -gt 0 ]]; then + echo "false" + exit + fi + echo "true" + tests: + bin_op: or + test_items: + - flag: "kubectl: not found" + - flag: "jq: not found" + - flag: "true" remediation: | Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace. - scored: false + scored: true From 72eee4b7a4e894213381259d473d2300bf8d53d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 09:15:28 +0200 Subject: [PATCH 213/262] build(deps): bump alpine from 3.19.0 to 3.19.1 (#1557) Bumps alpine from 3.19.0 to 3.19.1. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 9bbcf8ce5..0ec03d178 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.19.0 AS run +FROM alpine:3.19.1 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 From 66a215189e2704dcb9230bcb611d5398c7cbe5b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 09:31:45 +0200 Subject: [PATCH 214/262] build(deps): bump codecov/codecov-action from 3 to 4 (#1561) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 3 to 4. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v3...v4) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 070d24b33..079c30f25 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -49,7 +49,7 @@ jobs: - name: Run unit tests run: make tests - name: Upload code coverage - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: file: ./coverage.txt e2e: From f297da66037e9145858c6247de67c96a6f1be790 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 09:51:35 +0200 Subject: [PATCH 215/262] build(deps): bump golang from 1.21.6 to 1.22.0 (#1569) Bumps golang from 1.21.6 to 1.22.0. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- Dockerfile | 2 +- Dockerfile.fips.ubi | 2 +- Dockerfile.ubi | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0ec03d178..10ce9ed84 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.6 AS build +FROM golang:1.22.0 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi index 3142d75b9..5a63b2bbc 100644 --- a/Dockerfile.fips.ubi +++ b/Dockerfile.fips.ubi @@ -1,4 +1,4 @@ -FROM golang:1.21.6 AS build +FROM golang:1.22.0 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 071711004..e3fe7c4fc 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.21.6 AS build +FROM golang:1.22.0 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From 57132a69fdd369892c09890d54933c76a3c37416 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 10:27:44 +0200 Subject: [PATCH 216/262] build(deps): bump gorm.io/driver/postgres from 1.5.4 to 1.5.6 (#1567) Bumps [gorm.io/driver/postgres](https://github.com/go-gorm/postgres) from 1.5.4 to 1.5.6. - [Commits](https://github.com/go-gorm/postgres/compare/v1.5.4...v1.5.6) --- updated-dependencies: - dependency-name: gorm.io/driver/postgres dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index fd6fd2f49..141f5a3ad 100644 --- a/go.mod +++ b/go.mod @@ -15,8 +15,8 @@ require ( github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.8.4 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.5.4 - gorm.io/gorm v1.25.5 + gorm.io/driver/postgres v1.5.6 + gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde k8s.io/apimachinery v0.29.1 k8s.io/client-go v0.29.1 ) diff --git a/go.sum b/go.sum index 1e3a41b04..20bb48cea 100644 --- a/go.sum +++ b/go.sum @@ -284,10 +284,10 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.5.4 h1:Iyrp9Meh3GmbSuyIAGyjkN+n9K+GHX9b9MqsTL4EJCo= -gorm.io/driver/postgres v1.5.4/go.mod h1:Bgo89+h0CRcdA33Y6frlaHHVuTdOf87pmyzwW9C/BH0= -gorm.io/gorm v1.25.5 h1:zR9lOiiYf09VNh5Q1gphfyia1JpiClIWG9hQaxB/mls= -gorm.io/gorm v1.25.5/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +gorm.io/driver/postgres v1.5.6 h1:ydr9xEd5YAM0vxVDY0X139dyzNz10spDiDlC7+ibLeU= +gorm.io/driver/postgres v1.5.6/go.mod h1:3e019WlBaYI5o5LIdNV+LyxCMNtLOQETBXL2h4chKpA= +gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde h1:9DShaph9qhkIYw7QF91I/ynrr4cOO2PZra2PFD7Mfeg= +gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= From 3db3f736f8b5423882f22747c51978849d56fded Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Feb 2024 13:12:30 +0200 Subject: [PATCH 217/262] build(deps): bump golangci/golangci-lint-action from 3 to 4 (#1568) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3 to 4. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v3...v4) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 079c30f25..ca69f8022 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -32,7 +32,7 @@ jobs: - name: yaml-lint uses: ibiqlik/action-yamllint@v3 - name: Setup golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: version: latest args: --verbose From abfa7d9613f0f5f9e628a3ec87fea3443fe57805 Mon Sep 17 00:00:00 2001 From: chenk Date: Thu, 29 Feb 2024 13:37:20 +0200 Subject: [PATCH 218/262] release: prepare v0.7.2 (#1578) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 049f305c3..ecb9375fa 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:v0.7.1 + image: docker.io/aquasec/kube-bench:v0.7.2 name: kube-bench volumeMounts: - name: var-lib-cni From 45afbd76c27d558cb859c22e483088ee4b68b622 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Mar 2024 15:36:33 +0200 Subject: [PATCH 219/262] build(deps): bump github.com/aws/aws-sdk-go-v2/config (#1577) Bumps [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) from 1.26.6 to 1.27.4. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.26.6...config/v1.27.4) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 26 +++++++++++++------------- go.sum | 52 ++++++++++++++++++++++++++-------------------------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/go.mod b/go.mod index 141f5a3ad..5c320d6a6 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,8 @@ module github.com/aquasecurity/kube-bench go 1.21 require ( - github.com/aws/aws-sdk-go-v2 v1.24.1 - github.com/aws/aws-sdk-go-v2/config v1.26.6 + github.com/aws/aws-sdk-go-v2 v1.25.2 + github.com/aws/aws-sdk-go-v2/config v1.27.4 github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 github.com/fatih/color v1.16.0 github.com/golang/glog v1.2.0 @@ -22,17 +22,17 @@ require ( ) require ( - github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect - github.com/aws/smithy-go v1.19.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.4 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 // indirect + github.com/aws/smithy-go v1.20.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect diff --git a/go.sum b/go.sum index 20bb48cea..cb616c22b 100644 --- a/go.sum +++ b/go.sum @@ -1,35 +1,35 @@ github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= -github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= -github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o= -github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= -github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= +github.com/aws/aws-sdk-go-v2 v1.25.2 h1:/uiG1avJRgLGiQM9X3qJM8+Qa6KRGK5rRPuXE0HUM+w= +github.com/aws/aws-sdk-go-v2 v1.25.2/go.mod h1:Evoc5AsmtveRt1komDwIsjHFyrP5tDuF1D1U+6z6pNo= +github.com/aws/aws-sdk-go-v2/config v1.27.4 h1:AhfWb5ZwimdsYTgP7Od8E9L1u4sKmDW2ZVeLcf2O42M= +github.com/aws/aws-sdk-go-v2/config v1.27.4/go.mod h1:zq2FFXK3A416kiukwpsd+rD4ny6JC7QSkp4QdN1Mp2g= +github.com/aws/aws-sdk-go-v2/credentials v1.17.4 h1:h5Vztbd8qLppiPwX+y0Q6WiwMZgpd9keKe2EAENgAuI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.4/go.mod h1:+30tpwrkOgvkJL1rUZuRLoxcJwtI/OkeBLYnHxJtVe0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 h1:AK0J8iYBFeUk2Ax7O8YpLtFsfhdOByh2QIkHmigpRYk= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2/go.mod h1:iRlGzMix0SExQEviAyptRWRGdYNo3+ufW/lCzvKVTUc= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 h1:bNo4LagzUKbjdxE0tIcR9pMzLR2U/Tgie1Hq1HQ3iH8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2/go.mod h1:wRQv0nN6v9wDXuWThpovGQjqF1HFdcgWjporw14lS8k= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24/go.mod h1:gAuCezX/gob6BSMbItsSlMb6WZGV7K2+fWOvk8xBSto= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 h1:EtOU5jsPdIQNP+6Q2C5e3d65NKT1PeCiQk+9OdzO12Q= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2/go.mod h1:tyF5sKccmDz0Bv4NrstEr+/9YkSPJHrcO7UsUKf7pWM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 h1:5ffmXjPtwRExp1zc7gENLgCPyHFbhEPwVTkTiH9niSk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2/go.mod h1:Ru7vg1iQ7cR4i7SZ/JTLYN9kaXtbL69UdgG0OQWQxW0= github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 h1:+lpa31bGPPvgpZwUJ4ldKRCsPukzJ0PqoO5AQ9S79oQ= github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1/go.mod h1:vKGWzDG4Ytw3hgv/FvNy0HX/XEoJ6k/e7KAANzXWP8Y= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 h1:utEGkfdQ4L6YW/ietH7111ZYglLJvS+sLriHJ1NBJEQ= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.1/go.mod h1:RsYqzYr2F2oPDdpy+PdhephuZxTfjHQe7SOBcZGoAU8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 h1:9/GylMS45hGGFCcMrUZDVayQE1jYSIN6da9jo7RAYIw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1/go.mod h1:YjAPFn4kGFqKC54VsHs5fn5B6d+PCY2tziEa3U/GB5Y= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 h1:3I2cBEYgKhrWlwyZgfpSO2BpaMY1LHPqXYk/QGlu2ew= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.1/go.mod h1:uQ7YYKZt3adCRrdCBREm1CD3efFLOUNH77MrUCvx5oA= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= -github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= +github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From dc7441620f70fc96e9ecccaaabce7b7ec1cb4566 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 29 Mar 2024 14:10:34 +0300 Subject: [PATCH 220/262] build(deps): bump golang from 1.22.0 to 1.22.1 (#1583) Bumps golang from 1.22.0 to 1.22.1. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.fips.ubi | 2 +- Dockerfile.ubi | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 10ce9ed84..39885adb5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.0 AS build +FROM golang:1.22.1 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi index 5a63b2bbc..3fdd4fdcd 100644 --- a/Dockerfile.fips.ubi +++ b/Dockerfile.fips.ubi @@ -1,4 +1,4 @@ -FROM golang:1.22.0 AS build +FROM golang:1.22.1 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi b/Dockerfile.ubi index e3fe7c4fc..610e483bc 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.22.0 AS build +FROM golang:1.22.1 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From dc8f4d37f0587141c51c5018070bed81aaab2619 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 30 Mar 2024 12:41:07 +0300 Subject: [PATCH 221/262] build(deps): bump github.com/aws/aws-sdk-go-v2 from 1.25.2 to 1.26.0 (#1589) Bumps [github.com/aws/aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2) from 1.25.2 to 1.26.0. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.25.2...v1.26.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5c320d6a6..8a61a8ab6 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/aquasecurity/kube-bench go 1.21 require ( - github.com/aws/aws-sdk-go-v2 v1.25.2 + github.com/aws/aws-sdk-go-v2 v1.26.0 github.com/aws/aws-sdk-go-v2/config v1.27.4 github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 github.com/fatih/color v1.16.0 diff --git a/go.sum b/go.sum index cb616c22b..e36c12031 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.25.2 h1:/uiG1avJRgLGiQM9X3qJM8+Qa6KRGK5rRPuXE0HUM+w= -github.com/aws/aws-sdk-go-v2 v1.25.2/go.mod h1:Evoc5AsmtveRt1komDwIsjHFyrP5tDuF1D1U+6z6pNo= +github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA= +github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= github.com/aws/aws-sdk-go-v2/config v1.27.4 h1:AhfWb5ZwimdsYTgP7Od8E9L1u4sKmDW2ZVeLcf2O42M= github.com/aws/aws-sdk-go-v2/config v1.27.4/go.mod h1:zq2FFXK3A416kiukwpsd+rD4ny6JC7QSkp4QdN1Mp2g= github.com/aws/aws-sdk-go-v2/credentials v1.17.4 h1:h5Vztbd8qLppiPwX+y0Q6WiwMZgpd9keKe2EAENgAuI= From 73e1377ce0eb812931677d16c48e75b614f61e21 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 6 Apr 2024 08:59:45 +0300 Subject: [PATCH 222/262] build(deps): bump github.com/jackc/pgx/v5 from 5.4.3 to 5.5.4 (#1586) Bumps [github.com/jackc/pgx/v5](https://github.com/jackc/pgx) from 5.4.3 to 5.5.4. - [Changelog](https://github.com/jackc/pgx/blob/master/CHANGELOG.md) - [Commits](https://github.com/jackc/pgx/compare/v5.4.3...v5.5.4) --- updated-dependencies: - dependency-name: github.com/jackc/pgx/v5 dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 +++- go.sum | 8 ++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8a61a8ab6..e3ebb00eb 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,8 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/pgx/v5 v5.4.3 // indirect + github.com/jackc/pgx/v5 v5.5.4 // indirect + github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -77,6 +78,7 @@ require ( golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/net v0.19.0 // indirect golang.org/x/oauth2 v0.15.0 // indirect + golang.org/x/sync v0.5.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect diff --git a/go.sum b/go.sum index e36c12031..1deebe8b8 100644 --- a/go.sum +++ b/go.sum @@ -98,8 +98,10 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.4.3 h1:cxFyXhxlvAifxnkKKdlxv8XqUf59tDlYjnV5YYfsJJY= -github.com/jackc/pgx/v5 v5.4.3/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA= +github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8= +github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= @@ -220,6 +222,8 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= From d2d3e722710555dde14eebe25c098a6281acc701 Mon Sep 17 00:00:00 2001 From: mjshastha <61929310+mjshastha@users.noreply.github.com> Date: Thu, 18 Apr 2024 11:31:17 +0530 Subject: [PATCH 223/262] Currently, certain commands involve retrieving all node names or pods and then executing additional commands in a loop, resulting in a time complexity linearly proportional to the number of nodes. (#1597) This approach becomes time-consuming for larger clusters. As kube-bench is executed as a job on every node in the cluster, To enhance performance, Streamlined the commands to execute directly on current node where kube-bench operates. This change ensures that the time complexity remains constant, regardless of the cluster size. By running the necessary commands only once per node, regardless of how many nodes are in the cluster, this approach significantly boosts performance and efficiency. --- cfg/rh-1.0/etcd.yaml | 117 +++++++----- cfg/rh-1.0/master.yaml | 378 ++++++++++++++++++++++++++++----------- cfg/rh-1.0/node.yaml | 165 ++++++++--------- cfg/rh-1.0/policies.yaml | 45 +---- 4 files changed, 430 insertions(+), 275 deletions(-) diff --git a/cfg/rh-1.0/etcd.yaml b/cfg/rh-1.0/etcd.yaml index 2fa789850..4398d9cc1 100644 --- a/cfg/rh-1.0/etcd.yaml +++ b/cfg/rh-1.0/etcd.yaml @@ -11,16 +11,17 @@ groups: - id: 2.1 text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)" audit: | - # For --cert-file - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' - done 2>/dev/null - # For --key-file - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: @@ -36,10 +37,16 @@ groups: - id: 2.2 text: "Ensure that the --client-cert-auth argument is set to true (Manual)" audit: | - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: @@ -55,10 +62,16 @@ groups: text: "Ensure that the --auto-tls argument is not set to true (Manual)" audit: | # Returns 0 if found, 1 if not found - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? + fi use_multiple_values: true tests: test_items: @@ -73,16 +86,17 @@ groups: - id: 2.4 text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)" audit: | - # For --peer-cert-file - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' - done 2>/dev/null - # For --peer-key-file - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: @@ -97,10 +111,16 @@ groups: - id: 2.5 text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)" audit: | - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: @@ -116,10 +136,16 @@ groups: text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)" audit: | # Returns 0 if found, 1 if not found - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? + fi use_multiple_values: true tests: test_items: @@ -134,14 +160,17 @@ groups: - id: 2.7 text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" audit: | - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' - done 2>/dev/null - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: diff --git a/cfg/rh-1.0/master.yaml b/cfg/rh-1.0/master.yaml index 88589082a..37b50f033 100644 --- a/cfg/rh-1.0/master.yaml +++ b/cfg/rh-1.0/master.yaml @@ -11,10 +11,18 @@ groups: - id: 1.1.1 text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o name ) - do - oc exec -n openshift-kube-apiserver $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml; - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -29,11 +37,18 @@ groups: - id: 1.1.2 text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o name ) - do - oc exec -n openshift-kube-apiserver $i -- \ - stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -45,10 +60,18 @@ groups: - id: 1.1.3 text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-controller-manager -o name -l app=kube-controller-manager) - do - oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml; - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -63,11 +86,18 @@ groups: - id: 1.1.4 text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-controller-manager -o name -l app=kube-controller-manager) - do - oc exec -n openshift-kube-controller-manager $i -- \ - stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -79,10 +109,18 @@ groups: - id: 1.1.5 text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name ) - do - oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml; - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -97,11 +135,18 @@ groups: - id: 1.1.6 text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))" audit: | - for i in $( oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name ) - do - oc exec -n openshift-kube-scheduler $i -- \ - stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -113,10 +158,18 @@ groups: - id: 1.1.7 text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))" audit: | - for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd ) - do - oc rsh -n openshift-etcd $i stat -c "$i %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -131,10 +184,18 @@ groups: - id: 1.1.8 text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" audit: | - for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd ) - do - oc rsh -n openshift-etcd $i stat -c "$i %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -146,16 +207,41 @@ groups: - id: 1.1.9 text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') # For CNI multus - for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; done 2>/dev/null - for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; done 2>/dev/null + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null + fi # For SDN pods - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi + # For OVS pods - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi use_multiple_values: true tests: test_items: @@ -170,17 +256,40 @@ groups: - id: 1.1.10 text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') # For CNI multus - for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \""$i %n %U:%G\" /host/etc/cni/net.d/*.conf"; done 2>/dev/null - for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \""$i %n %U:%G\" /host/var/run/multus/cni/net.d/*.conf"; done 2>/dev/null + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null + oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null + fi # For SDN pods - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G"{} \;; done 2>/dev/null + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi # For OVS pods in 4.5 - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null - # For OVS pods in 4.6 TBD + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi use_multiple_values: true tests: test_items: @@ -192,7 +301,18 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)" audit: | - for i in $(oc get pods -n openshift-etcd -l app=etcd -oname); do oc exec -n openshift-etcd -c etcd $i -- stat -c "$i %n permissions=%a" /var/lib/etcd/member; done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /var/lib/etcd/member + fi use_multiple_values: true tests: test_items: @@ -207,7 +327,18 @@ groups: - id: 1.1.12 text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)" audit: | - for i in $(oc get pods -n openshift-etcd -l app=etcd -oname); do oc exec -n openshift-etcd -c etcd $i -- stat -c "$i %n %U:%G" /var/lib/etcd/member; done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /var/lib/etcd/member + fi use_multiple_values: true tests: test_items: @@ -219,10 +350,8 @@ groups: - id: 1.1.13 text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))" audit: | - for i in $(oc get nodes -o name) - do - oc debug $i -- chroot /host stat -c "$i %n permissions=%a" /etc/kubernetes/kubeconfig - done 2>/dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubeconfig 2> /dev/null use_multiple_values: true tests: test_items: @@ -237,10 +366,8 @@ groups: - id: 1.1.14 text: "Ensure that the admin.conf file ownership is set to root:root (Manual)" audit: | - for i in $(oc get nodes -o name) - do - oc debug $i -- chroot /host stat -c "$i %n %U:%G" /etc/kubernetes/kubeconfig - done 2>/dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubeconfig 2> /dev/null use_multiple_values: true tests: test_items: @@ -252,10 +379,18 @@ groups: - id: 1.1.15 text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name) - do - oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi use_multiple_values: true tests: test_items: @@ -270,10 +405,18 @@ groups: - id: 1.1.16 text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)" audit: | - for i in $(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name) - do - oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi use_multiple_values: true tests: test_items: @@ -285,10 +428,18 @@ groups: - id: 1.1.17 text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o name) - do - oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi use_multiple_values: true tests: test_items: @@ -303,10 +454,18 @@ groups: - id: 1.1.18 text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)" audit: | - for i in $(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o name) - do - oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi use_multiple_values: true tests: test_items: @@ -319,15 +478,22 @@ groups: text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)" audit: | # Should return root:root for all files and directories - for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') - do - # echo $i static-pod-certs - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - # echo $i static-pod-resources - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # echo $i static-pod-certs + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + # echo $i static-pod-resources + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + fi use_multiple_values: true tests: test_items: @@ -339,11 +505,18 @@ groups: - id: 1.1.20 text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') - do - # echo $i static-pod-certs - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$i %n permissions=%a" {} \; - done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi use_multiple_values: true tests: test_items: @@ -358,11 +531,18 @@ groups: - id: 1.1.21 text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)" audit: | - for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') - do - # echo $i static-pod-certs - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$i %n permissions=%a" {} \; - done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi use_multiple_values: true tests: test_items: @@ -532,11 +712,9 @@ groups: oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' # For OCP 4.5 and earlier verify that authorization-mode is not used - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode - oc debug node/${node} -- chroot /host ps -aux | grep kubelet | grep authorization-mode - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode 2> /dev/null + oc debug node/$NODE_NAME -- chroot /host ps -aux | grep kubelet | grep authorization-mode 2> /dev/null #Check that no overrides are configured oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' audit_config: | diff --git a/cfg/rh-1.0/node.yaml b/cfg/rh-1.0/node.yaml index 0ea5682ff..fb982d6f3 100644 --- a/cfg/rh-1.0/node.yaml +++ b/cfg/rh-1.0/node.yaml @@ -11,11 +11,8 @@ groups: - id: 4.1.1 text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/systemd/system/kubelet.service - done 2> /dev/null - use_multiple_values: true + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null tests: test_items: - flag: "permissions" @@ -30,11 +27,8 @@ groups: text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" audit: | # Should return root:root for each node - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/systemd/system/kubelet.service - done 2> /dev/null - use_multiple_values: true + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null tests: test_items: - flag: root:root @@ -45,11 +39,17 @@ groups: - id: 4.1.3 text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname) - do - oc exec -n openshift-sdn $i -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml - done 2> /dev/null - use_multiple_values: true + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" - stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null + fi tests: bin_op: or test_items: @@ -65,10 +65,17 @@ groups: - id: 4.1.4 text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" audit: | - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname) - do - oc exec -n openshift-sdn $i -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml - done 2> /dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null + fi use_multiple_values: true tests: bin_op: or @@ -82,10 +89,8 @@ groups: text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)" audit: | # Check permissions - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/kubernetes/kubelet.conf - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: test_items: @@ -100,10 +105,8 @@ groups: - id: 4.1.6 text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/kubernetes/kubelet.conf - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: test_items: @@ -115,10 +118,8 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/kubernetes/kubelet-ca.crt - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null use_multiple_values: true tests: test_items: @@ -133,10 +134,8 @@ groups: - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/kubernetes/kubelet-ca.crt - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null use_multiple_values: true tests: test_items: @@ -148,10 +147,8 @@ groups: - id: 4.1.9 text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /var/lib/kubelet/kubeconfig - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/kubeconfig 2> /dev/null use_multiple_values: true tests: test_items: @@ -166,10 +163,8 @@ groups: - id: 4.1.10 text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /var/lib/kubelet/kubeconfig - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/kubeconfig 2> /dev/null use_multiple_values: true tests: test_items: @@ -184,10 +179,8 @@ groups: - id: 4.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: test_items: @@ -205,10 +198,8 @@ groups: audit: | POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') TOKEN=$(oc whoami -t) - for name in $(oc get nodes -ojsonpath='{.items[*].metadata.name}') - do - oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$name/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$NODE_NAME/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' 2> /dev/null use_multiple_values: true tests: test_items: @@ -220,14 +211,12 @@ groups: - id: 4.2.3 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf | awk -F': ' '{ print "clientCAFile=" $2 }' - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: test_items: - - flag: clientCAFile="/etc/kubernetes/kubelet-ca.crt" + - flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"' remediation: | None required. Changing the clientCAFile value is unsupported. scored: true @@ -255,18 +244,13 @@ groups: - id: 4.2.5 text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" audit: | - # Should return 1 for each node - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout - echo exit_code=$? - done 2>/dev/null - # Should return 1 for each node - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf - echo exit_code=$? - done 2>/dev/null + # Should return 1 for node + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout 2> /dev/null + echo exit_code=$? + # Should return 1 for node + oc debug node/${NODE_NAME} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf 2> /dev/null + echo exit_code=$? use_multiple_values: true tests: bin_op: or @@ -291,10 +275,8 @@ groups: - id: 4.2.6 text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); - do - oc debug node/${node} -- chroot /host more /etc/kubernetes/kubelet.conf; - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host more /etc/kubernetes/kubelet.conf 2> /dev/null tests: test_items: - flag: protectKernelDefaults @@ -349,10 +331,8 @@ groups: - id: 4.2.9 text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); - do - oc debug node/${node} -- chroot /host more /etc/kubernetes/kubelet.conf; - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf; oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 type: "manual" @@ -365,7 +345,12 @@ groups: - id: 4.2.10 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" audit: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' + oc get configmap config -n openshift-kube-apiserver -o json \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments | + .["kubelet-client-certificate"][0], + .["kubelet-client-key"][0] + ' tests: bin_op: and test_items: @@ -380,15 +365,10 @@ groups: text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" audit: | #Verify the rotateKubeletClientCertificate feature gate is not set to false - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate 2> /dev/null # Verify the rotateCertificates argument is set to true - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot host grep rotate /etc/kubernetes/kubelet.conf; - done 2> /dev/null + oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: bin_op: or @@ -411,24 +391,19 @@ groups: text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" audit: | #Verify the rotateKubeletServerCertificate feature gate is on - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); - do - oc debug node/${node} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf; - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf 2> /dev/null # Verify the rotateCertificates argument is set to true - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot host grep rotate /etc/kubernetes/kubelet.conf; - done 2> /dev/null + oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: bin_op: or test_items: - - flag: RotateKubeletServerCertificate + - flag: rotateCertificates compare: op: eq value: true - - flag: rotateCertificates + - flag: RotateKubeletServerCertificate compare: op: eq value: true diff --git a/cfg/rh-1.0/policies.yaml b/cfg/rh-1.0/policies.yaml index 95de04e4a..e90cd877f 100644 --- a/cfg/rh-1.0/policies.yaml +++ b/cfg/rh-1.0/policies.yaml @@ -78,10 +78,7 @@ groups: text: "Minimize the admission of privileged containers (Manual)" audit: | # needs verification - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Privileged"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegedContainer:.allowPrivilegedContainer tests: test_items: - flag: "false" @@ -93,10 +90,7 @@ groups: - id: 5.2.2 text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" audit: | - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Host PID"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostPID:.allowHostPID tests: test_items: - flag: "false" @@ -108,10 +102,7 @@ groups: - id: 5.2.3 text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" audit: | - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Host IPC"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostIPC:.allowHostIPC tests: test_items: - flag: "false" @@ -123,10 +114,7 @@ groups: - id: 5.2.4 text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" audit: | - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Host Network"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostNetwork:.allowHostNetwork tests: test_items: - flag: "false" @@ -138,10 +126,7 @@ groups: - id: 5.2.5 text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" audit: | - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Privilege Escalation"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegeEscalation:.allowPrivilegeEscalation tests: test_items: - flag: "false" @@ -153,18 +138,10 @@ groups: - id: 5.2.6 text: "Minimize the admission of root containers (Manual)" audit: | - # needs verification - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; - oc describe scc $i | grep "Run As User Strategy"; - done + # needs verification # | awk 'NR>1 {gsub("map\\[type:", "", $2); gsub("\\]$", "", $2); print $1 ":" $2}' + oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type #For SCCs with MustRunAs verify that the range of UIDs does not include 0 - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; - oc describe scc $i | grep "\sUID"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax tests: bin_op: or test_items: @@ -183,11 +160,7 @@ groups: text: "Minimize the admission of containers with the NET_RAW capability (Manual)" audit: | # needs verification - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; - oc describe scc $i | grep "Required Drop Capabilities"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,requiredDropCapabilities:.requiredDropCapabilities tests: bin_op: or test_items: From 65c484e85aa199d1a23a0254486502ae6a960d9f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 18 Apr 2024 09:54:55 +0300 Subject: [PATCH 224/262] build(deps): bump k8s.io/client-go from 0.29.1 to 0.29.3 (#1587) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.29.1 to 0.29.3. - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.29.1...v0.29.3) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: chenk --- go.mod | 10 +++++----- go.sum | 24 ++++++++++-------------- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/go.mod b/go.mod index e3ebb00eb..a6cda311f 100644 --- a/go.mod +++ b/go.mod @@ -17,8 +17,8 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.5.6 gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde - k8s.io/apimachinery v0.29.1 - k8s.io/client-go v0.29.1 + k8s.io/apimachinery v0.29.3 + k8s.io/client-go v0.29.3 ) require ( @@ -41,7 +41,7 @@ require ( github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.4.0 // indirect @@ -84,11 +84,11 @@ require ( golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.1 // indirect + k8s.io/api v0.29.3 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect diff --git a/go.sum b/go.sum index 1deebe8b8..62f9abca2 100644 --- a/go.sum +++ b/go.sum @@ -69,15 +69,13 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= @@ -265,10 +263,8 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -292,12 +288,12 @@ gorm.io/driver/postgres v1.5.6 h1:ydr9xEd5YAM0vxVDY0X139dyzNz10spDiDlC7+ibLeU= gorm.io/driver/postgres v1.5.6/go.mod h1:3e019WlBaYI5o5LIdNV+LyxCMNtLOQETBXL2h4chKpA= gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde h1:9DShaph9qhkIYw7QF91I/ynrr4cOO2PZra2PFD7Mfeg= gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= -k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= -k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= -k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= -k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= -k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= From ff9341a5d0b1f9cac619c462637270f235b37c0a Mon Sep 17 00:00:00 2001 From: chenk Date: Thu, 18 Apr 2024 09:58:44 +0300 Subject: [PATCH 225/262] release: prepare-v0.7.3 (#1599) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index ecb9375fa..d8afd6562 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:v0.7.2 + image: docker.io/aquasec/kube-bench:v0.7.3 name: kube-bench volumeMounts: - name: var-lib-cni From 2a8615befdafcbc2cf8d8d45440fb40098df2517 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 May 2024 19:35:58 +0300 Subject: [PATCH 226/262] build(deps): bump golang from 1.22.1 to 1.22.2 (#1596) Bumps golang from 1.22.1 to 1.22.2. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.fips.ubi | 2 +- Dockerfile.ubi | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 39885adb5..4a01f53e7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.1 AS build +FROM golang:1.22.2 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi index 3fdd4fdcd..12f42b9f8 100644 --- a/Dockerfile.fips.ubi +++ b/Dockerfile.fips.ubi @@ -1,4 +1,4 @@ -FROM golang:1.22.1 AS build +FROM golang:1.22.2 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 610e483bc..b4bb641ea 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.22.1 AS build +FROM golang:1.22.2 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From ed51191d7ce982003959a803544ad36e11eef2ad Mon Sep 17 00:00:00 2001 From: Derek Nola Date: Mon, 20 May 2024 03:47:15 -0700 Subject: [PATCH 227/262] Replace custom k3s etcd script checks with vanilla grep checks (#1601) * Replace custom k3s etcd script checks with vanilla grep checks Signed-off-by: Derek Nola * Rework etcd grep, remove etcd ENV checks (no-op), add correct k3s etcddatadir Signed-off-by: Derek Nola * chore: update go-linter version Signed-off-by: chenk * Use etcddatadir variable Signed-off-by: Derek Nola --------- Signed-off-by: Derek Nola Signed-off-by: chenk Co-authored-by: chenk --- .github/workflows/build.yml | 2 +- cfg/config.yaml | 3 +++ cfg/k3s-cis-1.23/config.yaml | 3 ++- cfg/k3s-cis-1.23/etcd.yaml | 25 +++++++------------------ cfg/k3s-cis-1.23/master.yaml | 4 ++-- cfg/k3s-cis-1.24/config.yaml | 2 ++ cfg/k3s-cis-1.24/etcd.yaml | 25 +++++++------------------ cfg/k3s-cis-1.24/master.yaml | 4 ++-- cfg/k3s-cis-1.7/config.yaml | 3 ++- cfg/k3s-cis-1.7/etcd.yaml | 25 +++++++------------------ cfg/k3s-cis-1.7/master.yaml | 4 ++-- 11 files changed, 37 insertions(+), 63 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ca69f8022..6d455a8eb 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -34,7 +34,7 @@ jobs: - name: Setup golangci-lint uses: golangci/golangci-lint-action@v4 with: - version: latest + version: v1.57.2 args: --verbose unit: name: Unit tests diff --git a/cfg/config.yaml b/cfg/config.yaml index 05aeeb477..b26115f37 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -95,6 +95,7 @@ master: datadirs: - /var/lib/etcd/default.etcd - /var/lib/etcd/data.etcd + - /var/lib/rancher/k3s/server/db/etcd confs: - /etc/kubernetes/manifests/etcd.yaml - /etc/kubernetes/manifests/etcd.yml @@ -105,6 +106,7 @@ master: - /var/snap/microk8s/current/args/etcd - /usr/lib/systemd/system/etcd.service - /var/lib/rancher/rke2/server/db/etcd/config + - /var/lib/rancher/k3s/server/db/etcd/config defaultconf: /etc/kubernetes/manifests/etcd.yaml defaultdatadir: /var/lib/etcd/default.etcd @@ -234,6 +236,7 @@ etcd: datadirs: - /var/lib/etcd/default.etcd - /var/lib/etcd/data.etcd + - /var/lib/rancher/k3s/server/db/etcd confs: - /etc/kubernetes/manifests/etcd.yaml - /etc/kubernetes/manifests/etcd.yml diff --git a/cfg/k3s-cis-1.23/config.yaml b/cfg/k3s-cis-1.23/config.yaml index 32033c099..d6deb1ce6 100644 --- a/cfg/k3s-cis-1.23/config.yaml +++ b/cfg/k3s-cis-1.23/config.yaml @@ -24,7 +24,8 @@ master: etcd: bins: - containerd - + datadirs: + - /var/lib/rancher/k3s/server/db/etcd node: components: - kubelet diff --git a/cfg/k3s-cis-1.23/etcd.yaml b/cfg/k3s-cis-1.23/etcd.yaml index 1bbb60d43..54cfd0816 100644 --- a/cfg/k3s-cis-1.23/etcd.yaml +++ b/cfg/k3s-cis-1.23/etcd.yaml @@ -10,15 +10,13 @@ groups: checks: - id: 2.1 text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" - audit: "check_for_k3s_etcd.sh 2.1" + audit: "grep -A 4 'client-transport-security' $etcdconf | grep -E 'cert-file|key-file'" tests: bin_op: and test_items: - flag: "cert-file" - env: "ETCD_CERT_FILE" set: true - flag: "key-file" - env: "ETCD_KEY_FILE" set: true remediation: | Follow the etcd service documentation and configure TLS encryption. @@ -30,14 +28,13 @@ groups: - id: 2.2 text: "Ensure that the --client-cert-auth argument is set to true (Automated)" - audit: "check_for_k3s_etcd.sh 2.2" + audit: "grep -A 4 'client-transport-security' $etcdconf | grep 'client-cert-auth'" tests: bin_op: or test_items: - flag: "--client-cert-auth" set: true - flag: "client-cert-auth" - env: "ETCD_CLIENT_CERT_AUTH" compare: op: eq value: true @@ -50,15 +47,13 @@ groups: - id: 2.3 text: "Ensure that the --auto-tls argument is not set to true (Automated)" - audit: "check_for_k3s_etcd.sh 2.3" + audit: "if grep -q '^auto-tls' $etcdconf;then grep '^auto-tls' $etcdconf;else echo 'notset';fi" tests: bin_op: or test_items: - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" set: false - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" compare: op: eq value: false @@ -70,15 +65,13 @@ groups: - id: 2.4 text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" - audit: "check_for_k3s_etcd.sh 2.4" + audit: "grep -A 4 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file'" tests: bin_op: and test_items: - flag: "cert-file" - env: "ETCD_PEER_CERT_FILE" set: true - flag: "key-file" - env: "ETCD_PEER_KEY_FILE" set: true remediation: | Follow the etcd service documentation and configure peer TLS encryption as appropriate @@ -91,14 +84,13 @@ groups: - id: 2.5 text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" - audit: "check_for_k3s_etcd.sh 2.5" + audit: "grep -A 4 'peer-transport-security' $etcdconf | grep 'client-cert-auth'" tests: bin_op: or test_items: - flag: "--client-cert-auth" set: true - flag: "client-cert-auth" - env: "ETCD_PEER_CLIENT_CERT_AUTH" compare: op: eq value: true @@ -111,15 +103,13 @@ groups: - id: 2.6 text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" - audit: "check_for_k3s_etcd.sh 2.6" + audit: "if grep -q '^peer-auto-tls' $etcdconf;then grep '^peer-auto-tls' $etcdconf;else echo 'notset';fi" tests: bin_op: or test_items: - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" set: false - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" compare: op: eq value: false @@ -132,11 +122,10 @@ groups: - id: 2.7 text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" - audit: "check_for_k3s_etcd.sh 2.7" + audit: "if grep -q 'trusted-ca-file' $etcdconf;then grep 'trusted-ca-file' $etcdconf;else echo 'notset';fi" tests: test_items: - flag: "trusted-ca-file" - env: "ETCD_TRUSTED_CA_FILE" set: true remediation: | [Manual test] diff --git a/cfg/k3s-cis-1.23/master.yaml b/cfg/k3s-cis-1.23/master.yaml index 08d7e7485..a03bca99c 100644 --- a/cfg/k3s-cis-1.23/master.yaml +++ b/cfg/k3s-cis-1.23/master.yaml @@ -155,7 +155,7 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: "check_for_k3s_etcd.sh 1.1.11" + audit: "stat -c %a $etcddatadir" tests: test_items: - flag: "700" @@ -736,7 +736,7 @@ groups: - id: 1.2.26 text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "check_for_k3s_etcd.sh 1.2.29" + audit: "journalctl -D /var/log/journal -u k3s | grep -m1 'Running kube-apiserver'" tests: bin_op: and test_items: diff --git a/cfg/k3s-cis-1.24/config.yaml b/cfg/k3s-cis-1.24/config.yaml index 32033c099..cafe7019a 100644 --- a/cfg/k3s-cis-1.24/config.yaml +++ b/cfg/k3s-cis-1.24/config.yaml @@ -24,6 +24,8 @@ master: etcd: bins: - containerd + datadirs: + - /var/lib/rancher/k3s/server/db/etcd node: components: diff --git a/cfg/k3s-cis-1.24/etcd.yaml b/cfg/k3s-cis-1.24/etcd.yaml index d797f56c7..40af92e65 100644 --- a/cfg/k3s-cis-1.24/etcd.yaml +++ b/cfg/k3s-cis-1.24/etcd.yaml @@ -10,15 +10,13 @@ groups: checks: - id: 2.1 text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" - audit: "check_for_k3s_etcd.sh 2.1" + audit: "grep -A 4 'client-transport-security' $etcdconf | grep -E 'cert-file|key-file'" tests: bin_op: and test_items: - flag: "cert-file" - env: "ETCD_CERT_FILE" set: true - flag: "key-file" - env: "ETCD_KEY_FILE" set: true remediation: | Follow the etcd service documentation and configure TLS encryption. @@ -30,14 +28,13 @@ groups: - id: 2.2 text: "Ensure that the --client-cert-auth argument is set to true (Automated)" - audit: "check_for_k3s_etcd.sh 2.2" + audit: "grep -A 4 'client-transport-security' $etcdconf | grep 'client-cert-auth'" tests: bin_op: or test_items: - flag: "--client-cert-auth" set: true - flag: "client-cert-auth" - env: "ETCD_CLIENT_CERT_AUTH" compare: op: eq value: true @@ -50,15 +47,13 @@ groups: - id: 2.3 text: "Ensure that the --auto-tls argument is not set to true (Automated)" - audit: "check_for_k3s_etcd.sh 2.3" + audit: "if grep -q '^auto-tls' $etcdconf;then grep '^auto-tls' $etcdconf;else echo 'notset';fi" tests: bin_op: or test_items: - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" set: false - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" compare: op: eq value: false @@ -70,15 +65,13 @@ groups: - id: 2.4 text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" - audit: "check_for_k3s_etcd.sh 2.4" + audit: "grep -A 4 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file'" tests: bin_op: and test_items: - flag: "cert-file" - env: "ETCD_PEER_CERT_FILE" set: true - flag: "key-file" - env: "ETCD_PEER_KEY_FILE" set: true remediation: | Follow the etcd service documentation and configure peer TLS encryption as appropriate @@ -91,14 +84,13 @@ groups: - id: 2.5 text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" - audit: "check_for_k3s_etcd.sh 2.5" + audit: "grep -A 4 'peer-transport-security' $etcdconf | grep 'client-cert-auth'" tests: bin_op: or test_items: - flag: "--client-cert-auth" set: true - flag: "client-cert-auth" - env: "ETCD_PEER_CLIENT_CERT_AUTH" compare: op: eq value: true @@ -111,15 +103,13 @@ groups: - id: 2.6 text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" - audit: "check_for_k3s_etcd.sh 2.6" + audit: "if grep -q '^peer-auto-tls' $etcdconf;then grep '^peer-auto-tls' $etcdconf;else echo 'notset';fi" tests: bin_op: or test_items: - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" set: false - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" compare: op: eq value: false @@ -132,11 +122,10 @@ groups: - id: 2.7 text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" - audit: "check_for_k3s_etcd.sh 2.7" + audit: "if grep -q 'trusted-ca-file' $etcdconf;then grep 'trusted-ca-file' $etcdconf;else echo 'notset';fi" tests: test_items: - flag: "trusted-ca-file" - env: "ETCD_TRUSTED_CA_FILE" set: true remediation: | [Manual test] diff --git a/cfg/k3s-cis-1.24/master.yaml b/cfg/k3s-cis-1.24/master.yaml index ce57bc871..6af44c7a5 100644 --- a/cfg/k3s-cis-1.24/master.yaml +++ b/cfg/k3s-cis-1.24/master.yaml @@ -155,7 +155,7 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: "check_for_k3s_etcd.sh 1.1.11" + audit: "stat -c %a $etcddatadir" tests: test_items: - flag: "700" @@ -735,7 +735,7 @@ groups: - id: 1.2.26 text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "check_for_k3s_etcd.sh 1.2.29" + audit: "journalctl -D /var/log/journal -u k3s | grep -m1 'Running kube-apiserver'" tests: bin_op: and test_items: diff --git a/cfg/k3s-cis-1.7/config.yaml b/cfg/k3s-cis-1.7/config.yaml index 816af1e8e..e9574b035 100644 --- a/cfg/k3s-cis-1.7/config.yaml +++ b/cfg/k3s-cis-1.7/config.yaml @@ -31,7 +31,8 @@ master: etcd: bins: - containerd - + datadirs: + - /var/lib/rancher/k3s/server/db/etcd node: components: - kubelet diff --git a/cfg/k3s-cis-1.7/etcd.yaml b/cfg/k3s-cis-1.7/etcd.yaml index 1535ea606..d29818148 100644 --- a/cfg/k3s-cis-1.7/etcd.yaml +++ b/cfg/k3s-cis-1.7/etcd.yaml @@ -10,15 +10,13 @@ groups: checks: - id: 2.1 text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" - audit: "check_for_k3s_etcd.sh 2.1" + audit: "grep -A 4 'client-transport-security' $etcdconf | grep -E 'cert-file|key-file'" tests: bin_op: and test_items: - flag: "cert-file" - env: "ETCD_CERT_FILE" set: true - flag: "key-file" - env: "ETCD_KEY_FILE" set: true remediation: | Follow the etcd service documentation and configure TLS encryption. @@ -30,14 +28,13 @@ groups: - id: 2.2 text: "Ensure that the --client-cert-auth argument is set to true (Automated)" - audit: "check_for_k3s_etcd.sh 2.2" + audit: "grep -A 4 'client-transport-security' $etcdconf | grep 'client-cert-auth'" tests: bin_op: or test_items: - flag: "--client-cert-auth" set: true - flag: "client-cert-auth" - env: "ETCD_CLIENT_CERT_AUTH" compare: op: eq value: true @@ -50,15 +47,13 @@ groups: - id: 2.3 text: "Ensure that the --auto-tls argument is not set to true (Automated)" - audit: "check_for_k3s_etcd.sh 2.3" + audit: "if grep -q '^auto-tls' $etcdconf;then grep '^auto-tls' $etcdconf;else echo 'notset';fi" tests: bin_op: or test_items: - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" set: false - flag: "--auto-tls" - env: "ETCD_AUTO_TLS" compare: op: eq value: false @@ -70,15 +65,13 @@ groups: - id: 2.4 text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" - audit: "check_for_k3s_etcd.sh 2.4" + audit: "grep -A 4 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file'" tests: bin_op: and test_items: - flag: "cert-file" - env: "ETCD_PEER_CERT_FILE" set: true - flag: "key-file" - env: "ETCD_PEER_KEY_FILE" set: true remediation: | Follow the etcd service documentation and configure peer TLS encryption as appropriate @@ -91,14 +84,13 @@ groups: - id: 2.5 text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" - audit: "check_for_k3s_etcd.sh 2.5" + audit: "grep -A 4 'peer-transport-security' $etcdconf | grep 'client-cert-auth'" tests: bin_op: or test_items: - flag: "--client-cert-auth" set: true - flag: "client-cert-auth" - env: "ETCD_PEER_CLIENT_CERT_AUTH" compare: op: eq value: true @@ -111,15 +103,13 @@ groups: - id: 2.6 text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" - audit: "check_for_k3s_etcd.sh 2.6" + audit: "if grep -q '^peer-auto-tls' $etcdconf;then grep '^peer-auto-tls' $etcdconf;else echo 'notset';fi" tests: bin_op: or test_items: - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" set: false - flag: "--peer-auto-tls" - env: "ETCD_PEER_AUTO_TLS" compare: op: eq value: false @@ -132,11 +122,10 @@ groups: - id: 2.7 text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" - audit: "check_for_k3s_etcd.sh 2.7" + audit: "if grep -q 'trusted-ca-file' $etcdconf;then grep 'trusted-ca-file' $etcdconf;else echo 'notset';fi" tests: test_items: - flag: "trusted-ca-file" - env: "ETCD_TRUSTED_CA_FILE" set: true remediation: | [Manual test] diff --git a/cfg/k3s-cis-1.7/master.yaml b/cfg/k3s-cis-1.7/master.yaml index 8c59d61e6..109b8d84e 100644 --- a/cfg/k3s-cis-1.7/master.yaml +++ b/cfg/k3s-cis-1.7/master.yaml @@ -167,7 +167,7 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: "check_for_k3s_etcd.sh 1.1.11" + audit: "stat -c %a $etcddatadir" tests: test_items: - flag: "700" @@ -738,7 +738,7 @@ groups: - id: 1.2.25 text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "check_for_k3s_etcd.sh 1.2.29" + audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: and test_items: From 0f8dfaf1156652b556c913aa8e78fccbb1dfd186 Mon Sep 17 00:00:00 2001 From: Paulo Gomes Date: Wed, 22 May 2024 05:37:36 +0000 Subject: [PATCH 228/262] Statically link binaries and remove debug information (#1615) Signed-off-by: Paulo Gomes --- .goreleaser.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.goreleaser.yml b/.goreleaser.yml index 78d0bb485..cb15102d0 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -2,10 +2,15 @@ project_name: kube-bench env: - GO111MODULE=on + - CGO_ENABLED=0 - KUBEBENCH_CFG=/etc/kube-bench/cfg builds: - main: main.go binary: kube-bench + tags: + - osusergo + - netgo + - static_build goos: - linux - darwin @@ -19,6 +24,9 @@ builds: - 6 - 7 ldflags: + - "-s" + - "-w" + - "-extldflags '-static'" - "-X github.com/aquasecurity/kube-bench/cmd.KubeBenchVersion={{.Version}}" - "-X github.com/aquasecurity/kube-bench/cmd.cfgDir={{.Env.KUBEBENCH_CFG}}" # Archive customization From d8fc37649a239dc5e0ec22d73955aa712e1267bb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 31 May 2024 17:28:56 +0300 Subject: [PATCH 229/262] build(deps): bump alpine from 3.19.1 to 3.20.0 (#1621) Bumps alpine from 3.19.1 to 3.20.0. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 4a01f53e7..fb77e0493 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.19.1 AS run +FROM alpine:3.20.0 AS run WORKDIR /opt/kube-bench/ # add GNU ps for -C, -o cmd, and --no-headers support # https://github.com/aquasecurity/kube-bench/issues/109 From 7027b6b2ec63686bd1fb4270485cd9d1e651a6d3 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Wed, 26 Jun 2024 08:53:57 -0400 Subject: [PATCH 230/262] Add CIS kubernetes CIS-1.9 for k8s v1.27 - v1.29 (#1617) * Create cis-1.9 yamls and Update info - policies.yaml - 5.1.1 to 5.1.6 were adapted from Manual to Automated - 5.1.3 got broken down into 5.1.3.1 and 5.1.3.2 - 5.1.6 got broken down into 5.1.6.1 and 5.1.6.2 - version was set to cis-1.9 - node.yaml master.yaml controlplane.yaml etcd.yaml - version was set to cis-1.9 * Adapt master.yaml - Expand 1.1.13/1.1.14 checks by adding super-admin.conf to the permission and ownership verification - Remove 1.2.12 Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual) - Adjust numbering from 1.2.12 to 1.2.29 * Adjust policies.yaml - Check 5.2.3 to 5.2.9 Title Automated to Manual * Append node.yaml - Create 4.3 kube-config group - Create 4.3.1 Ensure that the kube-proxy metrics service is bound to localhost (Automated) * Adjust policies 5.1.3 and 5.1.6 - Merge 5.1.3.1 and 5.1.3.2 into 5.1.3 (use role_is_compliant and clusterrole_is_compliant) - Remove 5.1.6.1 and promote 5.1.6.2 to 5.1.6 since it natively covered 5.1.6.1 artifacts * Add kubectl dependency and update publish - Download kubectl (build stage) based on version and architecture - Add binary checksum verification - Use go env GOARCH for ARCH --- .github/workflows/publish.yml | 9 +- Dockerfile | 9 + Dockerfile.fips.ubi | 9 + Dockerfile.ubi | 9 + cfg/cis-1.9/config.yaml | 2 + cfg/cis-1.9/controlplane.yaml | 60 +++ cfg/cis-1.9/etcd.yaml | 135 +++++ cfg/cis-1.9/master.yaml | 919 ++++++++++++++++++++++++++++++++++ cfg/cis-1.9/node.yaml | 478 ++++++++++++++++++ cfg/cis-1.9/policies.yaml | 372 ++++++++++++++ cfg/config.yaml | 9 + cmd/common_test.go | 3 + docs/architecture.md | 1 + docs/platforms.md | 1 + makefile | 18 +- 15 files changed, 2027 insertions(+), 7 deletions(-) create mode 100644 cfg/cis-1.9/config.yaml create mode 100644 cfg/cis-1.9/controlplane.yaml create mode 100644 cfg/cis-1.9/etcd.yaml create mode 100644 cfg/cis-1.9/master.yaml create mode 100644 cfg/cis-1.9/node.yaml create mode 100644 cfg/cis-1.9/policies.yaml diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 681ae0a02..027941c5e 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -9,6 +9,7 @@ env: ALIAS: aquasecurity DOCKERHUB_ALIAS: aquasec REP: kube-bench + jobs: publish: name: Publish @@ -46,7 +47,10 @@ jobs: images: ${{ env.REP }} tag-semver: | {{version}} - + - name: Extract variables from makefile (kubectl) + id: extract_vars + run: | + echo "KUBECTL_VERSION=$(grep -oP '^KUBECTL_VERSION\s*\?=\s*\K.*' makefile)" >> $GITHUB_ENV - name: Build and push - Docker/ECR id: docker_build uses: docker/build-push-action@v5 @@ -57,6 +61,7 @@ jobs: push: true build-args: | KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + KUBECTL_VERSION=${{ env.KUBECTL_VERSION }} tags: | ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }} public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }} @@ -76,6 +81,7 @@ jobs: file: Dockerfile.ubi build-args: | KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + KUBECTL_VERSION=${{ env.KUBECTL_VERSION }} tags: | ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi @@ -95,6 +101,7 @@ jobs: file: Dockerfile.fips.ubi build-args: | KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + KUBECTL_VERSION=${{ env.KUBECTL_VERSION }} tags: | ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi-fips public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi-fips diff --git a/Dockerfile b/Dockerfile index fb77e0493..22032822b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,6 +9,14 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench +# Add kubectl to run policies checks +ARG KUBECTL_VERSION TARGETARCH +RUN wget -O /usr/local/bin/kubectl "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl" +RUN wget -O kubectl.sha256 "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl.sha256" +# Verify kubectl sha256sum +RUN /bin/bash -c 'echo "$( + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/cis-1.9/master.yaml b/cfg/cis-1.9/master.yaml new file mode 100644 index 000000000..ad1423eb5 --- /dev/null +++ b/cfg/cis-1.9/master.yaml @@ -0,0 +1,919 @@ +--- +controls: +version: "cis-1.9" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the default administrative credential file permissions are set to 600 (Automated)" + audit: | + for adminconf in /etc/kubernetes/{admin.conf,super-admin.conf}; do if test -e $adminconf; then stat -c \"permissions=%a %n\" $adminconf; fi; done + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present. + For example, chmod 600 /etc/kubernetes/super-admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the default administrative credential file ownership is set to root:root (Automated)" + audit: | + for adminconf in /tmp/{admin.conf,super-admin.conf}; do if test -e $adminconf; then stat -c "ownership=%U:%G %n" $adminconf; fi; done + use_multiple_values: true + tests: + test_items: + - flag: "ownership" + compare: + op: eq + value: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present. + For example, chmod 600 /etc/kubernetes/super-admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: have + value: "DenyServiceExternalIPs" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: false + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.13 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.15 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.16 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.17 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.20 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.21 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.22 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.23 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.24 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.28 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.29 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.9/node.yaml b/cfg/cis-1.9/node.yaml new file mode 100644 index 000000000..bd7cc1f68 --- /dev/null +++ b/cfg/cis-1.9/node.yaml @@ -0,0 +1,478 @@ +--- +controls: +version: "cis-1.9" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "File not found" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false + + - id: 4.3 + text: "kube-proxy" + checks: + - id: 4.3.1 + text: "Ensure that the kube-proxy metrics service is bound to localhost (Automated)" + audit: "/bin/ps -fC $proxybin" + audit_config: "/bin/sh -c 'if test -e $proxykubeconfig; then cat $proxykubeconfig; fi'" + tests: + bin_op: or + test_items: + - flag: "--metrics-bind-address" + path: '{.metricsBindAddress}' + compare: + op: has + value: "127.0.0.1" + - flag: "--metrics-bind-address" + path: '{.metricsBindAddress}' + set: false + remediation: | + Modify or remove any values which bind the metrics service to a non-localhost address. + The default value is 127.0.0.1:10249. + scored: true diff --git a/cfg/cis-1.9/policies.yaml b/cfg/cis-1.9/policies.yaml new file mode 100644 index 000000000..e99c42c98 --- /dev/null +++ b/cfg/cis-1.9/policies.yaml @@ -0,0 +1,372 @@ +--- +controls: +version: "cis-1.9" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Automated)" + audit: | + kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name --no-headers | while read -r role_name role_binding subject + do + if [[ "${role_name}" != "cluster-admin" && "${role_binding}" == "cluster-admin" ]]; then + is_compliant="false" + else + is_compliant="true" + fi; + echo "**role_name: ${role_name} role_binding: ${rolebinding} subject: ${subject} is_compliant: ${is_compliant}" + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name] + Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin. + scored: true + - id: 5.1.2 + text: "Minimize access to secrets (Automated)" + audit: "echo \"canGetListWatchSecretsAsSystemAuthenticated: $(kubectl auth can-i get,list,watch secrets --all-namespaces --as=system:authenticated)\"" + tests: + test_items: + - flag: "canGetListWatchSecretsAsSystemAuthenticated" + compare: + op: eq + value: no + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: true + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Automated)" + audit: | + # Check Roles + kubectl get roles --all-namespaces -o custom-columns=ROLE_NAMESPACE:.metadata.namespace,ROLE_NAME:.metadata.name --no-headers | while read -r role_namespace role_name + do + role_rules=$(kubectl get role -n "${role_namespace}" "${role_name}" -o=json | jq -c '.rules') + if echo "${role_rules}" | grep -q "\[\"\*\"\]"; then + role_is_compliant="false" + else + role_is_compliant="true" + fi; + echo "**role_name: ${role_name} role_namespace: ${role_namespace} role_rules: ${role_rules} role_is_compliant: ${role_is_compliant}" + done + + # Check ClusterRoles + kubectl get clusterroles -o custom-columns=CLUSTERROLE_NAME:.metadata.name --no-headers | while read -r clusterrole_name + do + clusterrole_rules=$(kubectl get clusterrole "${clusterrole_name}" -o=json | jq -c '.rules') + if echo "${clusterrole_rules}" | grep -q "\[\"\*\"\]"; then + clusterrole_is_compliant="false" + else + clusterrole_is_compliant="true" + fi; + echo "**clusterrole_name: ${clusterrole_name} clusterrole_rules: ${clusterrole_rules} clusterrole_is_compliant: ${clusterrole_is_compliant}" + done + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "role_is_compliant" + compare: + op: eq + value: true + set: true + - flag: "clusterrole_is_compliant" + compare: + op: eq + value: true + set: true + remediation: | + Where possible replace any use of wildcards ["*"] in roles and clusterroles with specific + objects or actions. + Condition: role_is_compliant is false if ["*"] is found in rules. + Condition: clusterrole_is_compliant is false if ["*"] is found in rules. + scored: true + - id: 5.1.4 + text: "Minimize access to create pods (Automated)" + audit: | + echo "canCreatePodsAsSystemAuthenticated: $(kubectl auth can-i create pods --all-namespaces --as=system:authenticated)" + tests: + test_items: + - flag: "canCreatePodsAsSystemAuthenticated" + compare: + op: eq + value: no + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: true + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Automated)" + audit: | + kubectl get serviceaccount --all-namespaces --field-selector metadata.name=default -o=json | jq -r '.items[] | " namespace: \(.metadata.namespace), kind: \(.kind), name: \(.metadata.name), automountServiceAccountToken: \(.automountServiceAccountToken | if . == null then "notset" else . end )"' | xargs -L 1 + use_multiple_values: true + tests: + test_items: + - flag: "automountServiceAccountToken" + compare: + op: eq + value: false + set: true + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + `automountServiceAccountToken: false`. + scored: true + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)" + audit: | + kubectl get pods --all-namespaces -o custom-columns=POD_NAMESPACE:.metadata.namespace,POD_NAME:.metadata.name,POD_SERVICE_ACCOUNT:.spec.serviceAccount,POD_IS_AUTOMOUNTSERVICEACCOUNTTOKEN:.spec.automountServiceAccountToken --no-headers | while read -r pod_namespace pod_name pod_service_account pod_is_automountserviceaccounttoken + do + # Retrieve automountServiceAccountToken's value for ServiceAccount and Pod, set to notset if null or . + svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n ${pod_namespace} ${pod_service_account} -o json | jq -r '.automountServiceAccountToken' | sed -e 's//notset/g' -e 's/null/notset/g') + pod_is_automountserviceaccounttoken=$(echo ${pod_is_automountserviceaccounttoken} | sed -e 's//notset/g' -e 's/null/notset/g') + if [[ "${svacc_is_automountserviceaccounttoken}" == "false" && ( "${pod_is_automountserviceaccounttoken}" == "false" || "${pod_is_automountserviceaccounttoken}" == "notset" ) ]]; then + is_compliant="true" + elif [[ "${svacc_is_automountserviceaccounttoken}" == "true" && "${pod_is_automountserviceaccounttoken}" == "false" ]]; then + is_compliant="true" + else + is_compliant="false" + fi + echo "**namespace: ${pod_namespace} pod_name: ${pod_name} service_account: ${pod_service_account} pod_is_automountserviceaccounttoken: ${pod_is_automountserviceaccounttoken} svacc_is_automountServiceAccountToken: ${svacc_is_automountserviceaccounttoken} is_compliant: ${is_compliant}" + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true + remediation: | + Modify the definition of ServiceAccounts and Pods which do not need to mount service + account tokens to disable it, with `automountServiceAccountToken: false`. + If both the ServiceAccount and the Pod's .spec specify a value for automountServiceAccountToken, the Pod spec takes precedence. + Condition: Pod is_compliant to true when + - ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset + - ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false + scored: true + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/config.yaml b/cfg/config.yaml index b26115f37..9ccd9e0c0 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -281,6 +281,9 @@ version_mapping: "1.24": "cis-1.24" "1.25": "cis-1.7" "1.26": "cis-1.8" + "1.27": "cis-1.9" + "1.28": "cis-1.9" + "1.29": "cis-1.9" "eks-1.0.1": "eks-1.0.1" "eks-1.1.0": "eks-1.1.0" "eks-1.2.0": "eks-1.2.0" @@ -359,6 +362,12 @@ target_mapping: - "controlplane" - "etcd" - "policies" + "cis-1.9": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" "gke-1.0": - "master" - "node" diff --git a/cmd/common_test.go b/cmd/common_test.go index 3627c9865..53793a000 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -242,6 +242,9 @@ func TestMapToCISVersion(t *testing.T) { {kubeVersion: "1.24", succeed: true, exp: "cis-1.24"}, {kubeVersion: "1.25", succeed: true, exp: "cis-1.7"}, {kubeVersion: "1.26", succeed: true, exp: "cis-1.8"}, + {kubeVersion: "1.27", succeed: true, exp: "cis-1.9"}, + {kubeVersion: "1.28", succeed: true, exp: "cis-1.9"}, + {kubeVersion: "1.29", succeed: true, exp: "cis-1.9"}, {kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"}, {kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"}, {kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"}, diff --git a/docs/architecture.md b/docs/architecture.md index 09d081e30..ef17ca6f6 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -22,6 +22,7 @@ The following table shows the valid targets based on the CIS Benchmark version. | cis-1.24 | master, controlplane, node, etcd, policies | | cis-1.7 | master, controlplane, node, etcd, policies | | cis-1.8 | master, controlplane, node, etcd, policies | +| cis-1.9 | master, controlplane, node, etcd, policies | | gke-1.0 | master, controlplane, node, etcd, policies, managedservices | | gke-1.2.0 | controlplane, node, policies, managedservices | | eks-1.0.1 | controlplane, node, policies, managedservices | diff --git a/docs/platforms.md b/docs/platforms.md index 92b6c7d10..e907609ad 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -17,6 +17,7 @@ Some defined by other hardenening guides. | CIS | [1.24](https://workbench.cisecurity.org/benchmarks/10873) | cis-1.24 | 1.24 | | CIS | [1.7](https://workbench.cisecurity.org/benchmarks/11107) | cis-1.7 | 1.25 | | CIS | [1.8](https://workbench.cisecurity.org/benchmarks/12958) | cis-1.8 | 1.26 | +| CIS | [1.9](https://workbench.cisecurity.org/benchmarks/16828) | cis-1.9 | 1.27-1.29 | | CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | | CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | | CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | diff --git a/makefile b/makefile index f9c91fc01..a0e01fb51 100644 --- a/makefile +++ b/makefile @@ -11,6 +11,8 @@ uname := $(shell uname -s) BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm,linux/ppc64le,linux/s390x DOCKER_ORGS ?= aquasec public.ecr.aws/aquasecurity GOARCH ?= $@ +KUBECTL_VERSION ?= 1.28.7 +ARCH ?= $(shell go env GOARCH) ifneq ($(findstring Microsoft,$(shell uname -r)),) BUILD_OS := windows @@ -45,15 +47,19 @@ build-fips: # builds the current dev docker version build-docker: docker build --build-arg BUILD_DATE=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ - --build-arg VCS_REF=$(VERSION) \ - --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ - -t $(IMAGE_NAME) . + --build-arg VCS_REF=$(VERSION) \ + --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ + --build-arg KUBECTL_VERSION=$(KUBECTL_VERSION) \ + --build-arg TARGETARCH=$(ARCH) \ + -t $(IMAGE_NAME) . build-docker-ubi: docker build -f Dockerfile.ubi --build-arg BUILD_DATE=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ - --build-arg VCS_REF=$(VERSION) \ - --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ - -t $(IMAGE_NAME_UBI) . + --build-arg VCS_REF=$(VERSION) \ + --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ + --build-arg KUBECTL_VERSION=$(KUBECTL_VERSION) \ + --build-arg TARGETARCH=$(ARCH) \ + -t $(IMAGE_NAME_UBI) . # unit tests tests: From 871027447f4350e7d98b55c936afabd4611610d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 29 Jun 2024 15:53:49 +0300 Subject: [PATCH 231/262] build(deps): bump goreleaser/goreleaser-action from 5 to 6 (#1628) Bumps [goreleaser/goreleaser-action](https://github.com/goreleaser/goreleaser-action) from 5 to 6. - [Release notes](https://github.com/goreleaser/goreleaser-action/releases) - [Commits](https://github.com/goreleaser/goreleaser-action/compare/v5...v6) --- updated-dependencies: - dependency-name: goreleaser/goreleaser-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- .github/workflows/release.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6d455a8eb..d13520197 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -95,7 +95,7 @@ jobs: with: fetch-depth: 0 - name: Dry-run release snapshot - uses: goreleaser/goreleaser-action@v5 + uses: goreleaser/goreleaser-action@v6 with: distribution: goreleaser version: v1.7.0 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4fc523f6f..b7ebe034d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -44,7 +44,7 @@ jobs: second_file_path: integration/testdata/Expected_output.data expected_result: PASSED - name: Release - uses: goreleaser/goreleaser-action@v5 + uses: goreleaser/goreleaser-action@v6 with: distribution: goreleaser version: v1.7.0 From 366e79ddda6b33d53e25d7a87236661ae9107496 Mon Sep 17 00:00:00 2001 From: chenk Date: Tue, 2 Jul 2024 10:35:09 +0300 Subject: [PATCH 232/262] release: prepare v0.8.0 (#1639) Signed-off-by: chenk --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index d8afd6562..6c7a61bea 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:v0.7.3 + image: docker.io/aquasec/kube-bench:v0.8.0 name: kube-bench volumeMounts: - name: var-lib-cni From 5a3fd1d896a9b88c3ce992c4d07e4b40a1ef5964 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Jul 2024 08:46:34 +0300 Subject: [PATCH 233/262] build(deps): bump golang from 1.22.2 to 1.22.4 (#1629) Bumps golang from 1.22.2 to 1.22.4. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.fips.ubi | 2 +- Dockerfile.ubi | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 22032822b..67aadfd64 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.2 AS build +FROM golang:1.22.4 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi index e9ce48fa7..1e2d6ab15 100644 --- a/Dockerfile.fips.ubi +++ b/Dockerfile.fips.ubi @@ -1,4 +1,4 @@ -FROM golang:1.22.2 AS build +FROM golang:1.22.4 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi b/Dockerfile.ubi index b81b51809..ae4049f23 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.22.2 AS build +FROM golang:1.22.4 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From c533d68bad67be657289b7f47821f33a999361bb Mon Sep 17 00:00:00 2001 From: Saurabh Misra <45711974+sm171190@users.noreply.github.com> Date: Tue, 24 Sep 2024 11:26:58 +0530 Subject: [PATCH 234/262] FIXING RKE-2-CIS-1.24 Checks (#1688) MASTER: Checks 1.1.10,1.1.20 are manual NODE: a. Check 4.2.12 is the node-level equivalent of the master-level check 1.3.6 and is treated the same way. --- cfg/rke2-cis-1.24/master.yaml | 19 ++++++++++++++++--- cfg/rke2-cis-1.24/node.yaml | 2 +- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/cfg/rke2-cis-1.24/master.yaml b/cfg/rke2-cis-1.24/master.yaml index 13afa29ea..a11048d14 100644 --- a/cfg/rke2-cis-1.24/master.yaml +++ b/cfg/rke2-cis-1.24/master.yaml @@ -148,12 +148,18 @@ groups: - id: 1.1.10 text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" audit: | - ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + '/bin/sh -c "if [[ -e /etc/cni/net.d ]]; then + ps -fC "${kubeletbin:-kubelet}" | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + else + echo "File not found" + fi' use_multiple_values: true tests: + bin_op: or test_items: - flag: "root:root" + - flag: "File not found" remediation: | Run the below command (based on the file location on your system) on the control plane node. For example, @@ -321,11 +327,18 @@ groups: - id: 1.1.21 text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" - audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.key" + audit: | + '/bin/sh -c if test -e "/var/lib/rancher/rke2/server/tls/*.key"; then + stat -c "%a" "/var/lib/rancher/rke2/server/tls/*.key" + else + echo "File not found" + fi' use_multiple_values: true tests: + bin_op: or test_items: - flag: "permissions" + - flag: "File not found" compare: op: eq value: "600" @@ -979,7 +992,7 @@ groups: Edit the Controller Manager pod specification file $controllermanagerconf on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. --feature-gates=RotateKubeletServerCertificate=true - scored: true + scored: false type: skip - id: 1.3.7 diff --git a/cfg/rke2-cis-1.24/node.yaml b/cfg/rke2-cis-1.24/node.yaml index b99703fc4..8ecce99c0 100644 --- a/cfg/rke2-cis-1.24/node.yaml +++ b/cfg/rke2-cis-1.24/node.yaml @@ -440,7 +440,7 @@ groups: systemctl daemon-reload systemctl restart kubelet.service scored: false - + type: skip - id: 4.2.13 text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" audit: "/bin/ps -fC $kubeletbin" From f8b6f2fc19565e1c9b33f2548e9e7a75dc195488 Mon Sep 17 00:00:00 2001 From: mjshastha <61929310+mjshastha@users.noreply.github.com> Date: Tue, 24 Sep 2024 11:42:40 +0530 Subject: [PATCH 235/262] chore: fixed vulns - bump Go version (#1687) --- .github/workflows/build.yml | 2 +- .github/workflows/release.yml | 2 +- Dockerfile | 2 +- Dockerfile.fips.ubi | 2 +- Dockerfile.ubi | 2 +- go.mod | 10 +++++----- go.sum | 16 ++++++++-------- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d13520197..4d344b9b2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -14,7 +14,7 @@ on: - "LICENSE" - "NOTICE" env: - GO_VERSION: "1.21" + GO_VERSION: "1.22.7" KIND_VERSION: "v0.11.1" KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index b7ebe034d..84ea40db7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,7 +5,7 @@ on: tags: - "v*" env: - GO_VERSION: "1.21" + GO_VERSION: "1.22.7" KIND_VERSION: "v0.11.1" KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6" diff --git a/Dockerfile b/Dockerfile index 67aadfd64..eacd2532a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.4 AS build +FROM golang:1.22.7 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi index 1e2d6ab15..91c86b30e 100644 --- a/Dockerfile.fips.ubi +++ b/Dockerfile.fips.ubi @@ -1,4 +1,4 @@ -FROM golang:1.22.4 AS build +FROM golang:1.22.7 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi b/Dockerfile.ubi index ae4049f23..074f7b2ef 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.22.4 AS build +FROM golang:1.22.7 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/go.mod b/go.mod index a6cda311f..e0a3c5776 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/aquasecurity/kube-bench -go 1.21 +go 1.22 require ( github.com/aws/aws-sdk-go-v2 v1.26.0 @@ -74,13 +74,13 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.17.0 // indirect + golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/net v0.19.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/oauth2 v0.15.0 // indirect golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index 62f9abca2..979defc02 100644 --- a/go.sum +++ b/go.sum @@ -199,8 +199,8 @@ go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTV golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -212,8 +212,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -233,10 +233,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= From a9422a6623f61bd4440cac189c3aa2a426a8f75d Mon Sep 17 00:00:00 2001 From: Derek Nola Date: Wed, 25 Sep 2024 00:12:02 -0700 Subject: [PATCH 236/262] Overhaul of K3s scans (#1659) * Overhaul K3s 1.X checks Signed-off-by: Derek Nola * Overhaul K3s 2.X Checks Signed-off-by: Derek Nola * Overhaul K3s 4.X checks Signed-off-by: Derek Nola * Overhaul K3s 5.X checks Signed-off-by: Derek Nola * Add K3s cis-1.8 scan Signed-off-by: Derek Nola * Fix K3s 1.1.10 check Signed-off-by: Derek Nola * Merge journalctl checks for K3s Signed-off-by: Derek Nola * Matched Manual/Automated to correct scoring (false/true) Signed-off-by: Derek Nola * Remove incorrect use of check_for_default_sa.sh script Signed-off-by: Derek Nola --------- Signed-off-by: Derek Nola Co-authored-by: afdesk --- cfg/config.yaml | 8 +- cfg/k3s-cis-1.23/controlplane.yaml | 2 +- cfg/k3s-cis-1.23/master.yaml | 72 +-- cfg/k3s-cis-1.23/node.yaml | 16 +- cfg/k3s-cis-1.24/config.yaml | 54 +- cfg/k3s-cis-1.24/controlplane.yaml | 2 +- cfg/k3s-cis-1.24/etcd.yaml | 141 +++-- cfg/k3s-cis-1.24/master.yaml | 545 ++++++++-------- cfg/k3s-cis-1.24/node.yaml | 292 ++++----- cfg/k3s-cis-1.24/policies.yaml | 4 +- cfg/k3s-cis-1.7/config.yaml | 52 +- cfg/k3s-cis-1.7/controlplane.yaml | 2 +- cfg/k3s-cis-1.7/etcd.yaml | 141 +++-- cfg/k3s-cis-1.7/master.yaml | 554 ++++++++-------- cfg/k3s-cis-1.7/node.yaml | 276 ++++---- cfg/k3s-cis-1.7/policies.yaml | 26 +- cfg/k3s-cis-1.8/config.yaml | 54 ++ cfg/k3s-cis-1.8/controlplane.yaml | 62 ++ cfg/k3s-cis-1.8/etcd.yaml | 144 +++++ cfg/k3s-cis-1.8/master.yaml | 985 +++++++++++++++++++++++++++++ cfg/k3s-cis-1.8/node.yaml | 422 ++++++++++++ cfg/k3s-cis-1.8/policies.yaml | 300 +++++++++ cfg/rke-cis-1.23/policies.yaml | 12 +- cfg/rke-cis-1.24/policies.yaml | 8 - cfg/rke-cis-1.7/policies.yaml | 11 +- 25 files changed, 3056 insertions(+), 1129 deletions(-) create mode 100644 cfg/k3s-cis-1.8/config.yaml create mode 100644 cfg/k3s-cis-1.8/controlplane.yaml create mode 100644 cfg/k3s-cis-1.8/etcd.yaml create mode 100644 cfg/k3s-cis-1.8/master.yaml create mode 100644 cfg/k3s-cis-1.8/node.yaml create mode 100644 cfg/k3s-cis-1.8/policies.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index 9ccd9e0c0..cdd3fb8d1 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -164,7 +164,6 @@ node: - "/var/snap/microk8s/current/credentials/kubelet.config" - "/etc/kubernetes/kubeconfig-kubelet" - "/var/lib/rancher/rke2/agent/kubelet.kubeconfig" - - "/var/lib/rancher/k3s/server/cred/admin.kubeconfig" - "/var/lib/rancher/k3s/agent/kubelet.kubeconfig" confs: - "/etc/kubernetes/kubelet-config.yaml" @@ -190,7 +189,6 @@ node: - "/etc/systemd/system/snap.kubelet.daemon.service" - "/etc/systemd/system/snap.microk8s.daemon-kubelet.service" - "/etc/kubernetes/kubelet.yaml" - - "/var/lib/rancher/rke2/agent/kubelet.kubeconfig" defaultconf: "/var/lib/kubelet/config.yaml" defaultsvc: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" @@ -438,6 +436,12 @@ target_mapping: - "controlplane" - "node" - "policies" + "k3s-cis-1.8": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" "k3s-cis-1.23": - "master" - "etcd" diff --git a/cfg/k3s-cis-1.23/controlplane.yaml b/cfg/k3s-cis-1.23/controlplane.yaml index 1aec579e5..aeee0a77c 100644 --- a/cfg/k3s-cis-1.23/controlplane.yaml +++ b/cfg/k3s-cis-1.23/controlplane.yaml @@ -21,7 +21,7 @@ groups: checks: - id: 3.2.1 text: "Ensure that a minimal audit policy is created (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" type: "manual" tests: test_items: diff --git a/cfg/k3s-cis-1.23/master.yaml b/cfg/k3s-cis-1.23/master.yaml index a03bca99c..0209dc73e 100644 --- a/cfg/k3s-cis-1.23/master.yaml +++ b/cfg/k3s-cis-1.23/master.yaml @@ -323,7 +323,7 @@ groups: checks: - id: 1.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" type: manual tests: test_items: @@ -371,7 +371,7 @@ groups: - id: 1.2.4 text: "Ensure that the --kubelet-https argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-https'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-https'" type: "skip" tests: bin_op: or @@ -389,7 +389,7 @@ groups: - id: 1.2.5 text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" tests: bin_op: and test_items: @@ -406,7 +406,7 @@ groups: - id: 1.2.6 text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" tests: test_items: - flag: "--kubelet-certificate-authority" @@ -420,7 +420,7 @@ groups: - id: 1.2.7 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -436,7 +436,7 @@ groups: - id: 1.2.8 text: "Ensure that the --authorization-mode argument includes Node (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -451,7 +451,7 @@ groups: - id: 1.2.9 text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -466,7 +466,7 @@ groups: - id: 1.2.10 text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -483,7 +483,7 @@ groups: - id: 1.2.11 text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: bin_op: or test_items: @@ -517,7 +517,7 @@ groups: - id: 1.2.13 text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: bin_op: or test_items: @@ -538,7 +538,7 @@ groups: - id: 1.2.14 text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" tests: bin_op: or test_items: @@ -557,7 +557,7 @@ groups: - id: 1.2.15 text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" tests: bin_op: or test_items: @@ -575,7 +575,7 @@ groups: - id: 1.2.16 text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -592,7 +592,7 @@ groups: - id: 1.2.17 text: "Ensure that the --secure-port argument is not set to 0 (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" tests: bin_op: or test_items: @@ -610,7 +610,7 @@ groups: - id: 1.2.18 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" tests: test_items: - flag: "--profiling" @@ -625,7 +625,7 @@ groups: - id: 1.2.19 text: "Ensure that the --audit-log-path argument is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" type: "skip" tests: test_items: @@ -639,7 +639,7 @@ groups: - id: 1.2.20 text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" type: "skip" tests: test_items: @@ -656,7 +656,7 @@ groups: - id: 1.2.21 text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" type: "skip" tests: test_items: @@ -673,7 +673,7 @@ groups: - id: 1.2.22 text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" type: "skip" tests: test_items: @@ -689,7 +689,7 @@ groups: - id: 1.2.23 text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" type: "skip" tests: test_items: @@ -702,7 +702,7 @@ groups: - id: 1.2.24 text: "Ensure that the --service-account-lookup argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" tests: bin_op: or test_items: @@ -722,7 +722,7 @@ groups: - id: 1.2.25 text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" type: "skip" tests: test_items: @@ -736,7 +736,7 @@ groups: - id: 1.2.26 text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep -m1 'Running kube-apiserver'" + audit: "journalctl -m -u k3s | grep -m1 'Running kube-apiserver'" tests: bin_op: and test_items: @@ -754,7 +754,7 @@ groups: - id: 1.2.27 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" tests: bin_op: and test_items: @@ -772,7 +772,7 @@ groups: - id: 1.2.28 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" tests: test_items: - flag: "--client-ca-file" @@ -785,7 +785,7 @@ groups: - id: 1.2.29 text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" tests: test_items: - flag: "--etcd-cafile" @@ -798,7 +798,7 @@ groups: - id: 1.2.30 text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" tests: test_items: - flag: "--encryption-provider-config" @@ -820,7 +820,7 @@ groups: - id: 1.2.32 text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" tests: test_items: - flag: "--tls-cipher-suites" @@ -845,7 +845,7 @@ groups: checks: - id: 1.3.1 text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" tests: test_items: - flag: "--terminated-pod-gc-threshold" @@ -857,7 +857,7 @@ groups: - id: 1.3.2 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" tests: test_items: - flag: "--profiling" @@ -872,7 +872,7 @@ groups: - id: 1.3.3 text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" tests: test_items: - flag: "--use-service-account-credentials" @@ -887,7 +887,7 @@ groups: - id: 1.3.4 text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" tests: test_items: - flag: "--service-account-private-key-file" @@ -900,7 +900,7 @@ groups: - id: 1.3.5 text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" tests: test_items: - flag: "--root-ca-file" @@ -912,7 +912,7 @@ groups: - id: 1.3.6 text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'" type: "skip" tests: bin_op: or @@ -953,7 +953,7 @@ groups: checks: - id: 1.4.1 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1" tests: test_items: - flag: "--profiling" @@ -969,7 +969,7 @@ groups: - id: 1.4.2 text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" tests: bin_op: or test_items: diff --git a/cfg/k3s-cis-1.23/node.yaml b/cfg/k3s-cis-1.23/node.yaml index c0b60dfe2..bedfe928a 100644 --- a/cfg/k3s-cis-1.23/node.yaml +++ b/cfg/k3s-cis-1.23/node.yaml @@ -186,7 +186,7 @@ groups: checks: - id: 4.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' tests: test_items: - flag: "--anonymous-auth" @@ -209,7 +209,7 @@ groups: - id: 4.2.2 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' ' tests: test_items: - flag: --authorization-mode @@ -231,7 +231,7 @@ groups: - id: 4.2.3 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' tests: test_items: - flag: --client-ca-file @@ -251,7 +251,7 @@ groups: - id: 4.2.4 text: "Ensure that the --read-only-port argument is set to 0 (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' " + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' " tests: bin_op: or test_items: @@ -276,7 +276,7 @@ groups: - id: 4.2.5 text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'" tests: test_items: - flag: --streaming-connection-idle-timeout @@ -302,7 +302,7 @@ groups: - id: 4.2.6 text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults'" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults'" type: "skip" tests: test_items: @@ -325,7 +325,7 @@ groups: - id: 4.2.7 text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'" type: "skip" tests: test_items: @@ -393,7 +393,7 @@ groups: - id: 4.2.10 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --tls-cert-file diff --git a/cfg/k3s-cis-1.24/config.yaml b/cfg/k3s-cis-1.24/config.yaml index cafe7019a..4adbac3a1 100644 --- a/cfg/k3s-cis-1.24/config.yaml +++ b/cfg/k3s-cis-1.24/config.yaml @@ -16,33 +16,43 @@ master: scheduler: bins: - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig controllermanager: bins: - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/controller.kubeconfig + etcd: bins: - containerd - datadirs: - - /var/lib/rancher/k3s/server/db/etcd - - node: - components: - - kubelet - - proxy - - kubelet: - bins: - - containerd - defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig - defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt - - proxy: - bins: - - containerd - defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig - - policies: - components: - - policies + +etcd: + components: + - etcd + + etcd: + confs: /var/lib/rancher/k3s/server/db/etcd/config + +node: + components: + - kubelet + - proxy + + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + +policies: + components: + - policies diff --git a/cfg/k3s-cis-1.24/controlplane.yaml b/cfg/k3s-cis-1.24/controlplane.yaml index fd9766efb..471017bd7 100644 --- a/cfg/k3s-cis-1.24/controlplane.yaml +++ b/cfg/k3s-cis-1.24/controlplane.yaml @@ -21,7 +21,7 @@ groups: checks: - id: 3.2.1 text: "Ensure that a minimal audit policy is created (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" tests: test_items: - flag: "--audit-policy-file" diff --git a/cfg/k3s-cis-1.24/etcd.yaml b/cfg/k3s-cis-1.24/etcd.yaml index 40af92e65..cb57781c0 100644 --- a/cfg/k3s-cis-1.24/etcd.yaml +++ b/cfg/k3s-cis-1.24/etcd.yaml @@ -10,128 +10,135 @@ groups: checks: - id: 2.1 text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" - audit: "grep -A 4 'client-transport-security' $etcdconf | grep -E 'cert-file|key-file'" + audit_config: "cat $etcdconf" tests: bin_op: and test_items: - - flag: "cert-file" - set: true - - flag: "key-file" - set: true + - path: "{.client-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt" + - path: "{.client-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key" remediation: | - Follow the etcd service documentation and configure TLS encryption. - Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml - on the master node and set the below parameters. - --cert-file= - --key-file= - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom cert and key files. + scored: false - id: 2.2 text: "Ensure that the --client-cert-auth argument is set to true (Automated)" - audit: "grep -A 4 'client-transport-security' $etcdconf | grep 'client-cert-auth'" + audit_config: "cat $etcdconf" tests: - bin_op: or test_items: - - flag: "--client-cert-auth" - set: true - - flag: "client-cert-auth" + - path: "{.client-transport-security.client-cert-auth}" compare: op: eq value: true - set: true remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --client-cert-auth="true" - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable client certificate authentication. + scored: false - id: 2.3 text: "Ensure that the --auto-tls argument is not set to true (Automated)" - audit: "if grep -q '^auto-tls' $etcdconf;then grep '^auto-tls' $etcdconf;else echo 'notset';fi" + audit_config: "cat $etcdconf" tests: bin_op: or test_items: - - flag: "--auto-tls" - set: false - - flag: "--auto-tls" + - path: "{.client-transport-security.auto-tls}" compare: op: eq value: false + - path: "{.client-transport-security.auto-tls}" + set: false remediation: | - Edit the etcd pod specification file $etcdconf on the master + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - scored: true + client-transport-security: + auto-tls: false + scored: false - id: 2.4 text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" - audit: "grep -A 4 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file'" + audit_config: "cat $etcdconf" tests: bin_op: and test_items: - - flag: "cert-file" - set: true - - flag: "key-file" - set: true + - path: "{.peer-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt" + - path: "{.peer-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key" remediation: | - Follow the etcd service documentation and configure peer TLS encryption as appropriate - for your etcd cluster. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameters. - --peer-client-file= - --peer-key-file= - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates peer cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom peer cert and key files. + scored: false - id: 2.5 text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" - audit: "grep -A 4 'peer-transport-security' $etcdconf | grep 'client-cert-auth'" + audit_config: "cat $etcdconf" tests: - bin_op: or test_items: - - flag: "--client-cert-auth" - set: true - - flag: "client-cert-auth" + - path: "{.peer-transport-security.client-cert-auth}" compare: op: eq value: true - set: true remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --peer-client-cert-auth=true - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable peer client certificate authentication. + scored: false - id: 2.6 text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" - audit: "if grep -q '^peer-auto-tls' $etcdconf;then grep '^peer-auto-tls' $etcdconf;else echo 'notset';fi" + audit_config: "cat $etcdconf" tests: bin_op: or test_items: - - flag: "--peer-auto-tls" - set: false - - flag: "--peer-auto-tls" + - path: "{.peer-transport-security.auto-tls}" compare: op: eq value: false - set: true + - path: "{.peer-transport-security.auto-tls}" + set: false remediation: | - Edit the etcd pod specification file $etcdconf on the master + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master node and either remove the --peer-auto-tls parameter or set it to false. - --peer-auto-tls=false - scored: true + peer-transport-security: + auto-tls: false + scored: false - id: 2.7 text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" - audit: "if grep -q 'trusted-ca-file' $etcdconf;then grep 'trusted-ca-file' $etcdconf;else echo 'notset';fi" + audit_config: "cat $etcdconf" tests: test_items: - - flag: "trusted-ca-file" - set: true + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt" remediation: | - [Manual test] - Follow the etcd documentation and create a dedicated certificate authority setup for the - etcd service. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameter. - --trusted-ca-file= + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates a unique certificate authority for etcd. + This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use a shared certificate authority. scored: false diff --git a/cfg/k3s-cis-1.24/master.yaml b/cfg/k3s-cis-1.24/master.yaml index 6af44c7a5..5ff0318c9 100644 --- a/cfg/k3s-cis-1.24/master.yaml +++ b/cfg/k3s-cis-1.24/master.yaml @@ -19,9 +19,8 @@ groups: op: bitmask value: "644" remediation: | - Run the below command (based on the file location on your system) on the - control plane node. - For example, chmod 644 $apiserverconf + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. scored: true - id: 1.1.2 @@ -32,8 +31,8 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $apiserverconf + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. scored: true - id: 1.1.3 @@ -47,8 +46,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $controllermanagerconf + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. scored: true - id: 1.1.4 @@ -59,8 +58,8 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $controllermanagerconf + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. scored: true - id: 1.1.5 @@ -74,8 +73,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $schedulerconf + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. scored: true - id: 1.1.6 @@ -86,8 +85,8 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $schedulerconf + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. scored: true - id: 1.1.7 @@ -101,9 +100,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $etcdconf + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. scored: true - id: 1.1.8 @@ -114,17 +112,14 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $etcdconf + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. scored: true - id: 1.1.9 text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated)" + audit: find /var/lib/cni/networks -type f ! -name lock 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a type: "skip" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a use_multiple_values: true tests: test_items: @@ -133,36 +128,37 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 + Not Applicable. + The default K3s CNI, flannel, does not create any files in /var/lib/cni/networks. scored: false - id: 1.1.10 text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)" + audit: find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G\ type: "skip" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G use_multiple_values: true tests: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root - scored: false + Not Applicable. + The default K3s CNI, flannel, does not create any files in /var/lib/cni/networks. + scored: true - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: "stat -c %a $etcddatadir" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd + else + echo "permissions=700" + fi tests: test_items: - - flag: "700" + - flag: "permissions" compare: - op: eq + op: bitmask value: "700" - set: true remediation: | On the etcd server node, get the etcd data directory, passed as an argument --data-dir, from the command 'ps -ef | grep etcd'. @@ -178,15 +174,14 @@ groups: test_items: - flag: "etcd:etcd" remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir, - from the command 'ps -ef | grep etcd'. - Run the below command (based on the etcd data directory found above). - For example, chown etcd:etcd /var/lib/etcd + Not Applicable. + For K3s, etcd is embedded within the k3s process. There is no separate etcd process. + Therefore the etcd data directory ownership is managed by the k3s process and should be root:root. scored: true - id: 1.1.13 text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig'" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" type: "skip" tests: test_items: @@ -212,12 +207,12 @@ groups: set: true remediation: | Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root /etc/kubernetes/admin.conf + For example, chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig scored: true - id: 1.1.15 text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" tests: test_items: - flag: "permissions" @@ -232,7 +227,7 @@ groups: - id: 1.1.16 text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" tests: test_items: - flag: "root:root" @@ -244,7 +239,7 @@ groups: - id: 1.1.17 text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi'" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" tests: test_items: - flag: "permissions" @@ -287,8 +282,8 @@ groups: scored: true - id: 1.1.20 - text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Automated)" - audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.crt" + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.crt'" use_multiple_values: true tests: test_items: @@ -297,14 +292,14 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. + Run the below command (based on the file location on your system) on the master node. For example, - chmod -R 600 /etc/kubernetes/pki/*.crt + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt scored: false - id: 1.1.21 text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" - audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.key" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key'" use_multiple_values: true tests: test_items: @@ -313,17 +308,17 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. + Run the below command (based on the file location on your system) on the master node. For example, - chmod -R 600 /etc/kubernetes/pki/*.key - scored: false + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key + scored: true - id: 1.2 text: "API Server" checks: - id: 1.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" tests: test_items: - flag: "--anonymous-auth" @@ -331,27 +326,29 @@ groups: op: eq value: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --anonymous-auth=false - scored: false + By default, K3s sets the --anonymous-auth argument to false. If it is set to true, + edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "anonymous-auth=true" + scored: true - id: 1.2.2 text: "Ensure that the --token-auth-file parameter is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--token-auth-file" set: false remediation: | - Follow the documentation and configure alternate mechanisms for authentication. Then, - edit the API server pod specification file $apiserverconf - on the control plane node and remove the --token-auth-file= parameter. + Follow the documentation and configure alternate mechanisms for authentication. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "token-auth-file=" scored: true - id: 1.2.3 text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -359,18 +356,18 @@ groups: compare: op: nothave value: "DenyServiceExternalIPs" - set: true - flag: "--enable-admission-plugins" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and remove the `DenyServiceExternalIPs` - from enabled admission plugins. + By default, K3s does not set DenyServiceExternalIPs. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" scored: true - id: 1.2.4 text: "Ensure that the --kubelet-https argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-https'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" type: "skip" tests: bin_op: or @@ -388,24 +385,25 @@ groups: - id: 1.2.5 text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" tests: bin_op: and test_items: - flag: "--kubelet-client-certificate" - flag: "--kubelet-client-key" remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and kubelets. Then, edit API server pod specification file - $apiserverconf on the control plane node and set the - kubelet client certificate and key parameters as below. - --kubelet-client-certificate= - --kubelet-client-key= + By default, K3s automatically provides the kubelet client certificate and key. + They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" scored: true - id: 1.2.6 text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" tests: test_items: - flag: "--kubelet-certificate-authority" @@ -419,7 +417,7 @@ groups: - id: 1.2.7 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -427,15 +425,15 @@ groups: op: nothave value: "AlwaysAllow" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. - One such example could be as below. - --authorization-mode=RBAC + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" scored: true - id: 1.2.8 text: "Ensure that the --authorization-mode argument includes Node (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -443,14 +441,14 @@ groups: op: has value: "Node" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes Node. - --authorization-mode=Node,RBAC + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. scored: true - id: 1.2.9 text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -458,14 +456,14 @@ groups: op: has value: "RBAC" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, - for example `--authorization-mode=Node,RBAC`. + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. scored: true - id: 1.2.10 - text: "Ensure that the admission control plugin EventRateLimit is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -474,15 +472,15 @@ groups: value: "EventRateLimit" remediation: | Follow the Kubernetes documentation and set the desired limits in a configuration file. - Then, edit the API server pod specification file $apiserverconf - and set the below parameters. - --enable-admission-plugins=...,EventRateLimit,... - --admission-control-config-file= + Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters. + kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" scored: false - id: 1.2.11 text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: bin_op: or test_items: @@ -493,14 +491,15 @@ groups: - flag: "--enable-admission-plugins" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a - value that does not include AlwaysAdmit. + By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" scored: true - id: 1.2.12 - text: "Ensure that the admission control plugin AlwaysPullImages is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -508,15 +507,18 @@ groups: op: has value: "AlwaysPullImages" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - AlwaysPullImages. - --enable-admission-plugins=...,AlwaysPullImages,... + Permissive, per CIS guidelines, + "This setting could impact offline or isolated clusters, which have images pre-loaded and + do not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter. + kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." scored: false - id: 1.2.13 - text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: bin_op: or test_items: @@ -537,7 +539,7 @@ groups: - id: 1.2.14 text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'ServiceAccount'" tests: bin_op: or test_items: @@ -548,15 +550,16 @@ groups: - flag: "--disable-admission-plugins" set: false remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. Follow the documentation and create ServiceAccount objects as per your environment. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and ensure that the --disable-admission-plugins parameter is set to a - value that does not include ServiceAccount. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" scored: true - id: 1.2.15 text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -567,14 +570,15 @@ groups: - flag: "--disable-admission-plugins" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --disable-admission-plugins parameter to - ensure it does not include NamespaceLifecycle. + By default, K3s does not set the --disable-admission-plugins to anything. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." scored: true - id: 1.2.16 text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -582,16 +586,16 @@ groups: op: has value: "NodeRestriction" remediation: | - Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to a - value that includes NodeRestriction. - --enable-admission-plugins=...,NodeRestriction,... + By default, K3s sets the --enable-admission-plugins to NodeRestriction. + If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins. + If you are, include NodeRestriction in the list. + kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." scored: true - id: 1.2.17 text: "Ensure that the --secure-port argument is not set to 0 (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" tests: bin_op: or test_items: @@ -602,14 +606,15 @@ groups: - flag: "--secure-port" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --secure-port parameter or - set it to a different (non-zero) desired port. + By default, K3s sets the secure port to 6444. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "secure-port=" scored: true - id: 1.2.18 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" tests: test_items: - flag: "--profiling" @@ -617,29 +622,28 @@ groups: op: eq value: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --profiling=false + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "profiling=true" scored: true - id: 1.2.19 - text: "Ensure that the --audit-log-path argument is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-path'" tests: test_items: - flag: "--audit-log-path" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-path parameter to a suitable path and + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and file where you would like audit logs to be written, for example, - --audit-log-path=/var/log/apiserver/audit.log - scored: true + kube-apiserver-arg: + - "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log" + scored: false - id: 1.2.20 - text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxage'" tests: test_items: - flag: "--audit-log-maxage" @@ -647,16 +651,15 @@ groups: op: gte value: 30 remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxage parameter to 30 - or as an appropriate number of days, for example, - --audit-log-maxage=30 - scored: true + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example, + kube-apiserver-arg: + - "audit-log-maxage=30" + scored: false - id: 1.2.21 - text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxbackup'" tests: test_items: - flag: "--audit-log-maxbackup" @@ -664,16 +667,15 @@ groups: op: gte value: 10 remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate - value. For example, - --audit-log-maxbackup=10 - scored: true + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example, + kube-apiserver-arg: + - "audit-log-maxbackup=10" + scored: false - id: 1.2.22 - text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxsize'" tests: test_items: - flag: "--audit-log-maxsize" @@ -681,27 +683,33 @@ groups: op: gte value: 100 remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. - For example, to set it as 100 MB, --audit-log-maxsize=100 - scored: true + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxsize parameter to an appropriate size in MB. For example, + kube-apiserver-arg: + - "audit-log-maxsize=100" + scored: false - id: 1.2.23 - text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'request-timeout'" tests: + bin_op: or test_items: - flag: "--request-timeout" + set: false + - flag: "--request-timeout" remediation: | - Edit the API server pod specification file $apiserverconf - and set the below parameter as appropriate and if needed. - For example, --request-timeout=300s - scored: true + Permissive, per CIS guidelines, + "it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". + Edit the K3s config file /etc/rancher/k3s/config.yaml + and set the below parameter if needed. For example, + kube-apiserver-arg: + - "request-timeout=300s" + scored: false - id: 1.2.24 text: "Ensure that the --service-account-lookup argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -712,30 +720,36 @@ groups: op: eq value: true remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --service-account-lookup=true - Alternatively, you can delete the --service-account-lookup parameter from this file so + By default, K3s does not set the --service-account-lookup argument. + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example, + kube-apiserver-arg: + - "service-account-lookup=true" + Alternatively, you can delete the service-account-lookup parameter from this file so that the default takes effect. scored: true - id: 1.2.25 text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'service-account-key-file'" tests: test_items: - flag: "--service-account-key-file" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --service-account-key-file parameter - to the public key file for service accounts. For example, - --service-account-key-file= + K3s automatically generates and sets the service account key file. + It is located at /var/lib/rancher/k3s/server/tls/service.key. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "service-account-key-file=" scored: true - id: 1.2.26 text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep -m1 'Running kube-apiserver'" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + journalctl -m -u k3s | grep -m1 'Running kube-apiserver' | tail -n1 + else + echo "--etcd-certfile AND --etcd-keyfile" + fi tests: bin_op: and test_items: @@ -744,16 +758,17 @@ groups: - flag: "--etcd-keyfile" set: true remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate and key file parameters. - --etcd-certfile= - --etcd-keyfile= + K3s automatically generates and sets the etcd certificate and key files. + They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" scored: true - id: 1.2.27 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" tests: bin_op: and test_items: @@ -762,57 +777,61 @@ groups: - flag: "--tls-private-key-file" set: true remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the TLS certificate and private key file parameters. - --tls-cert-file= - --tls-private-key-file= + By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver. + They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" scored: true - id: 1.2.28 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" tests: test_items: - flag: "--client-ca-file" remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the client certificate authority file. - --client-ca-file= + By default, K3s automatically provides the client certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "client-ca-file=" scored: true - id: 1.2.29 text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" tests: test_items: - flag: "--etcd-cafile" remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate authority file parameter. - --etcd-cafile= + By default, K3s automatically provides the etcd certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-cafile=" scored: true - id: 1.2.30 - text: "Ensure that the --encryption-provider-config argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" tests: test_items: - flag: "--encryption-provider-config" remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --encryption-provider-config parameter to the path of that file. - For example, --encryption-provider-config= + K3s can be configured to use encryption providers to encrypt secrets at rest. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json. scored: false - id: 1.2.31 - text: "Ensure that encryption providers are appropriately configured (Automated)" + text: "Ensure that encryption providers are appropriately configured (Manual)" audit: | - ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') - if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + ENCRYPTION_PROVIDER_CONFIG=$(journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi tests: test_items: - flag: "provider" @@ -820,13 +839,16 @@ groups: op: valid_elements value: "aescbc,kms,secretbox" remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - In this file, choose aescbc, kms or secretbox as the encryption provider. + K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json scored: false - id: 1.2.32 text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" tests: test_items: - flag: "--tls-cipher-suites" @@ -834,36 +856,32 @@ groups: op: valid_elements value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" remediation: | - Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml - on the control plane node and set the below parameter. - --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, - TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 - scored: false + By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. + If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. + If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following: + kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + scored: true - id: 1.3 text: "Controller Manager" checks: - id: 1.3.1 - text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" tests: test_items: - flag: "--terminated-pod-gc-threshold" remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, - for example, --terminated-pod-gc-threshold=10 + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node + and set the --terminated-pod-gc-threshold to an appropriate threshold, + kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" scored: false - id: 1.3.2 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" tests: test_items: - flag: "--profiling" @@ -871,14 +889,15 @@ groups: op: eq value: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the below parameter. - --profiling=false + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "profiling=true" scored: true - id: 1.3.3 text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" tests: test_items: - flag: "--use-service-account-credentials" @@ -886,40 +905,43 @@ groups: op: noteq value: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node to set the below parameter. - --use-service-account-credentials=true + By default, K3s sets the --use-service-account-credentials argument to true. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "use-service-account-credentials=false" scored: true - id: 1.3.4 text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" tests: test_items: - flag: "--service-account-private-key-file" remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --service-account-private-key-file parameter - to the private key file for service accounts. - --service-account-private-key-file= + By default, K3s automatically provides the service account private key file. + It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "service-account-private-key-file=" scored: true - id: 1.3.5 text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" tests: test_items: - flag: "--root-ca-file" remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. - --root-ca-file= + By default, K3s automatically provides the root CA file. + It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "root-ca-file=" scored: true - id: 1.3.6 text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'" - type: "skip" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1" tests: bin_op: or test_items: @@ -931,14 +953,16 @@ groups: - flag: "--feature-gates" set: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. - --feature-gates=RotateKubeletServerCertificate=true + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" scored: true - id: 1.3.7 text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'bind-address'" tests: bin_op: or test_items: @@ -950,8 +974,10 @@ groups: - flag: "--bind-address" set: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and ensure the correct value for the --bind-address parameter + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "bind-address=" scored: true - id: 1.4 @@ -959,7 +985,7 @@ groups: checks: - id: 1.4.1 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1" tests: test_items: - flag: "--profiling" @@ -968,14 +994,15 @@ groups: value: false set: true remediation: | - Edit the Scheduler pod specification file $schedulerconf file - on the control plane node and set the below parameter. - --profiling=false + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "profiling=true" scored: true - id: 1.4.2 text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" tests: bin_op: or test_items: @@ -987,6 +1014,8 @@ groups: - flag: "--bind-address" set: false remediation: | - Edit the Scheduler pod specification file $schedulerconf - on the control plane node and ensure the correct value for the --bind-address parameter + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "bind-address=" scored: true diff --git a/cfg/k3s-cis-1.24/node.yaml b/cfg/k3s-cis-1.24/node.yaml index 82ddff0fb..2b6c0dfa8 100644 --- a/cfg/k3s-cis-1.24/node.yaml +++ b/cfg/k3s-cis-1.24/node.yaml @@ -19,8 +19,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chmod 600 $kubeletsvc + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. scored: true - id: 4.1.2 @@ -31,14 +31,13 @@ groups: test_items: - flag: root:root remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. scored: true - id: 4.1.3 - text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" - audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' tests: bin_op: or test_items: @@ -51,11 +50,11 @@ groups: Run the below command (based on the file location on your system) on the each worker node. For example, chmod 600 $proxykubeconfig - scored: false + scored: true - id: 4.1.4 - text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi'' ' + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: 'stat -c %U:%G $proxykubeconfig' tests: bin_op: or test_items: @@ -63,18 +62,17 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig - scored: false + scored: true - id: 4.1.5 text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" - audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubelet.kubeconfig ' + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' tests: test_items: - - flag: "600" + - flag: "permissions" compare: - op: eq + op: bitmask value: "600" - set: true remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -83,7 +81,7 @@ groups: - id: 4.1.6 text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" - audit: 'stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig' + audit: 'stat -c %U:%G $kubeletkubeconfig' tests: test_items: - flag: "root:root" @@ -98,8 +96,8 @@ groups: scored: true - id: 4.1.7 - text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" - audit: "stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt" + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a $kubeletcafile" tests: test_items: - flag: "permissions" @@ -109,22 +107,25 @@ groups: set: true remediation: | Run the following command to modify the file permissions of the - --client-ca-file chmod 600 - scored: false + --client-ca-file chmod 600 $kubeletcafile + scored: true - id: 4.1.8 - text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" - audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt" + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $kubeletcafile" tests: test_items: - flag: root:root + compare: + op: eq + value: root:root remediation: | Run the following command to modify the ownership of the --client-ca-file. - chown root:root - scored: false + chown root:root $kubeletcafile + scored: true - id: 4.1.9 - text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' type: "skip" tests: @@ -134,20 +135,20 @@ groups: op: bitmask value: "600" remediation: | - Run the following command (using the config file location identified in the Audit step) - chmod 600 $kubeletconf + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. scored: true - id: 4.1.10 - text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' type: "skip" tests: test_items: - flag: root:root remediation: | - Run the following command (using the config file location identified in the Audit step) - chown root:root $kubeletconf + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. scored: true - id: 4.2 @@ -155,7 +156,7 @@ groups: checks: - id: 4.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' tests: test_items: - flag: "--anonymous-auth" @@ -165,20 +166,20 @@ groups: value: false set: true remediation: | - If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to - `false`. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - `--anonymous-auth=false` - Based on your system, restart the kubelet service. For example, + By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you + should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "anonymous-auth=true" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="anonymous-auth=true" + Based on your system, restart the k3s service. For example, systemctl daemon-reload - systemctl restart kubelet.service + systemctl restart k3s.service scored: true - id: 4.2.2 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' ' tests: test_items: - flag: --authorization-mode @@ -188,39 +189,33 @@ groups: value: AlwaysAllow set: true remediation: | - If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If - using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example, + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "authorization-mode=AlwaysAllow" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="authorization-mode=AlwaysAllow" + Based on your system, restart the k3s service. For example, systemctl daemon-reload - systemctl restart kubelet.service + systemctl restart k3s.service scored: true - id: 4.2.3 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' tests: test_items: - flag: --client-ca-file path: '{.authentication.x509.clientCAFile}' set: true remediation: | - If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --client-ca-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + By default, K3s automatically provides the client ca certificate for the Kubelet. + It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt scored: true - id: 4.2.4 - text: "Verify that the --read-only-port argument is set to 0 (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' " + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: bin_op: or test_items: @@ -233,19 +228,20 @@ groups: path: '{.readOnlyPort}' set: false remediation: | - If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --read-only-port=0 - Based on your system, restart the kubelet service. For example, + By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you + should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "read-only-port=XXXX" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="read-only-port=XXXX" + Based on your system, restart the k3s service. For example, systemctl daemon-reload - systemctl restart kubelet.service - scored: false + systemctl restart k3s.service + scored: true - id: 4.2.5 text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --streaming-connection-idle-timeout @@ -258,21 +254,17 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a - value other than 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --streaming-connection-idle-timeout=5m - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "streaming-connection-idle-timeout=5m" + If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: false - id: 4.2.6 text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults'" - type: "skip" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --protect-kernel-defaults @@ -282,20 +274,16 @@ groups: value: true set: true remediation: | - If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --protect-kernel-defaults=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. + protect-kernel-defaults: true + If using the command line, run K3s with --protect-kernel-defaults=true. + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: true - id: 4.2.7 text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'" - type: "skip" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --make-iptables-util-chains @@ -309,39 +297,31 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove the --make-iptables-util-chains argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. + kubelet-arg: + - "make-iptables-util-chains=true" + If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: true - id: 4.2.8 - text: "Ensure that the --hostname-override argument is not set (Manual)" - # This is one of those properties that can only be set as a command line argument. - # To check if the property is set as expected, we need to parse the kubelet command - # instead reading the Kubelet Configuration file. - audit: "/bin/ps -fC $kubeletbin " + text: "Ensure that the --hostname-override argument is not set (Automated)" type: "skip" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --hostname-override set: false remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and remove the --hostname-override argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false + Not Applicable. + By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply + with cloud providers that require this flag to ensure that hostname matches node names. + scored: true - id: 4.2.9 text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" - audit: "/bin/ps -fC $kubeletbin" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/cat $kubeletconf" tests: test_items: @@ -351,18 +331,18 @@ groups: op: eq value: 0 remediation: | - If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + By default, K3s sets the event-qps to 0. Should you wish to change this, + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "event-qps=" + If using the command line, run K3s with --kubelet-arg="event-qps=". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: false - id: 4.2.10 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1" + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --tls-cert-file @@ -370,23 +350,19 @@ groups: - flag: --tls-private-key-file path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' remediation: | - If using a Kubelet config file, edit the file to set `tlsCertFile` to the location - of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` - to the location of the corresponding private key file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameters in KUBELET_CERTIFICATE_ARGS variable. - --tls-cert-file= - --tls-private-key-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false + By default, K3s automatically provides the TLS certificate and private key for the Kubelet. + They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true - id: 4.2.11 text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: test_items: - flag: --rotate-certificates @@ -399,21 +375,16 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or - remove it altogether to use the default value. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS - variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter. + If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: true - id: 4.2.12 - text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: bin_op: or test_items: @@ -426,17 +397,17 @@ groups: path: '{.featureGates.RotateKubeletServerCertificate}' set: false remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. - --feature-gates=RotateKubeletServerCertificate=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter. + If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true - id: 4.2.13 text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "/bin/ps -fC $kubeletbin" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/cat $kubeletconf" tests: test_items: @@ -446,14 +417,11 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `TLSCipherSuites` to + kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" or to a subset of these values. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the --tls-cipher-suites parameter as follows, or to a subset of these values. - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service + If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=" + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: false diff --git a/cfg/k3s-cis-1.24/policies.yaml b/cfg/k3s-cis-1.24/policies.yaml index 33890fd57..4a4f9887b 100644 --- a/cfg/k3s-cis-1.24/policies.yaml +++ b/cfg/k3s-cis-1.24/policies.yaml @@ -152,8 +152,8 @@ groups: text: "Minimize the admission of containers with capabilities assigned (Manual)" type: "manual" remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applicaions which do not require any Linux capabities to operate consider adding + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities. scored: false diff --git a/cfg/k3s-cis-1.7/config.yaml b/cfg/k3s-cis-1.7/config.yaml index e9574b035..ed8124f08 100644 --- a/cfg/k3s-cis-1.7/config.yaml +++ b/cfg/k3s-cis-1.7/config.yaml @@ -23,32 +23,42 @@ master: scheduler: bins: - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig controllermanager: bins: - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/controller.kubeconfig etcd: bins: - containerd - datadirs: - - /var/lib/rancher/k3s/server/db/etcd - node: - components: - - kubelet - - proxy - - kubelet: - bins: - - containerd - defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig - defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt - - proxy: - bins: - - containerd - defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig - - policies: - components: - - policies + +etcd: + components: + - etcd + + etcd: + confs: /var/lib/rancher/k3s/server/db/etcd/config + +node: + components: + - kubelet + - proxy + + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + +policies: + components: + - policies diff --git a/cfg/k3s-cis-1.7/controlplane.yaml b/cfg/k3s-cis-1.7/controlplane.yaml index fa63febda..b23a04a15 100644 --- a/cfg/k3s-cis-1.7/controlplane.yaml +++ b/cfg/k3s-cis-1.7/controlplane.yaml @@ -35,7 +35,7 @@ groups: checks: - id: 3.2.1 text: "Ensure that a minimal audit policy is created (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" tests: test_items: - flag: "--audit-policy-file" diff --git a/cfg/k3s-cis-1.7/etcd.yaml b/cfg/k3s-cis-1.7/etcd.yaml index d29818148..004812013 100644 --- a/cfg/k3s-cis-1.7/etcd.yaml +++ b/cfg/k3s-cis-1.7/etcd.yaml @@ -10,128 +10,135 @@ groups: checks: - id: 2.1 text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" - audit: "grep -A 4 'client-transport-security' $etcdconf | grep -E 'cert-file|key-file'" + audit_config: "cat $etcdconf" tests: bin_op: and test_items: - - flag: "cert-file" - set: true - - flag: "key-file" - set: true + - path: "{.client-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt" + - path: "{.client-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key" remediation: | - Follow the etcd service documentation and configure TLS encryption. - Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml - on the master node and set the below parameters. - --cert-file= - --key-file= - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom cert and key files. + scored: false - id: 2.2 text: "Ensure that the --client-cert-auth argument is set to true (Automated)" - audit: "grep -A 4 'client-transport-security' $etcdconf | grep 'client-cert-auth'" + audit_config: "cat $etcdconf" tests: - bin_op: or test_items: - - flag: "--client-cert-auth" - set: true - - flag: "client-cert-auth" + - path: "{.client-transport-security.client-cert-auth}" compare: op: eq value: true - set: true remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --client-cert-auth="true" - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable client certificate authentication. + scored: false - id: 2.3 text: "Ensure that the --auto-tls argument is not set to true (Automated)" - audit: "if grep -q '^auto-tls' $etcdconf;then grep '^auto-tls' $etcdconf;else echo 'notset';fi" + audit_config: "cat $etcdconf" tests: bin_op: or test_items: - - flag: "--auto-tls" - set: false - - flag: "--auto-tls" + - path: "{.client-transport-security.auto-tls}" compare: op: eq value: false + - path: "{.client-transport-security.auto-tls}" + set: false remediation: | - Edit the etcd pod specification file $etcdconf on the master + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - scored: true + client-transport-security: + auto-tls: false + scored: false - id: 2.4 text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" - audit: "grep -A 4 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file'" + audit_config: "cat $etcdconf" tests: bin_op: and test_items: - - flag: "cert-file" - set: true - - flag: "key-file" - set: true + - path: "{.peer-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt" + - path: "{.peer-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key" remediation: | - Follow the etcd service documentation and configure peer TLS encryption as appropriate - for your etcd cluster. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameters. - --peer-client-file= - --peer-key-file= - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates peer cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom peer cert and key files. + scored: false - id: 2.5 text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" - audit: "grep -A 4 'peer-transport-security' $etcdconf | grep 'client-cert-auth'" + audit_config: "cat $etcdconf" tests: - bin_op: or test_items: - - flag: "--client-cert-auth" - set: true - - flag: "client-cert-auth" + - path: "{.peer-transport-security.client-cert-auth}" compare: op: eq value: true - set: true remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --peer-client-cert-auth=true - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable peer client certificate authentication. + scored: false - id: 2.6 text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" - audit: "if grep -q '^peer-auto-tls' $etcdconf;then grep '^peer-auto-tls' $etcdconf;else echo 'notset';fi" + audit_config: "cat $etcdconf" tests: bin_op: or test_items: - - flag: "--peer-auto-tls" - set: false - - flag: "--peer-auto-tls" + - path: "{.peer-transport-security.auto-tls}" compare: op: eq value: false - set: true + - path: "{.peer-transport-security.auto-tls}" + set: false remediation: | - Edit the etcd pod specification file $etcdconf on the master + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master node and either remove the --peer-auto-tls parameter or set it to false. - --peer-auto-tls=false - scored: true + peer-transport-security: + auto-tls: false + scored: false - id: 2.7 text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" - audit: "if grep -q 'trusted-ca-file' $etcdconf;then grep 'trusted-ca-file' $etcdconf;else echo 'notset';fi" + audit_config: "cat $etcdconf" tests: test_items: - - flag: "trusted-ca-file" - set: true + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt" remediation: | - [Manual test] - Follow the etcd documentation and create a dedicated certificate authority setup for the - etcd service. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameter. - --trusted-ca-file= + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates a unique certificate authority for etcd. + This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use a shared certificate authority. scored: false diff --git a/cfg/k3s-cis-1.7/master.yaml b/cfg/k3s-cis-1.7/master.yaml index 109b8d84e..9e2ef0880 100644 --- a/cfg/k3s-cis-1.7/master.yaml +++ b/cfg/k3s-cis-1.7/master.yaml @@ -19,10 +19,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the - control plane node. - For example, chmod 600 $apiserverconf Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. scored: true - id: 1.1.2 @@ -33,9 +31,8 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $apiserverconf Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. scored: true - id: 1.1.3 @@ -49,9 +46,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $controllermanagerconf Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. scored: true - id: 1.1.4 @@ -62,9 +58,8 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $controllermanagerconf Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. scored: true - id: 1.1.5 @@ -78,9 +73,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $schedulerconf Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. scored: true - id: 1.1.6 @@ -91,9 +85,8 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $schedulerconf Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. scored: true - id: 1.1.7 @@ -108,10 +101,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $etcdconf Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. scored: true - id: 1.1.8 @@ -123,18 +114,13 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $etcdconf Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. scored: true - id: 1.1.9 - text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated)" - type: "skip" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: find /var/lib/cni/networks -type f ! -name lock 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a use_multiple_values: true tests: test_items: @@ -143,17 +129,15 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 - Not Applicable. + By default, K3s sets the CNI file permissions to 644. + Note that for many CNIs, a lock file is created with permissions 750. This is expected and can be ignored. + If you modify your CNI configuration, ensure that the permissions are set to 600. + For example, chmod 600 /var/lib/cni/networks/ scored: false - id: 1.1.10 - text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" - type: skip - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)" + audit: find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G use_multiple_values: true tests: test_items: @@ -161,25 +145,28 @@ groups: remediation: | Run the below command (based on the file location on your system) on the control plane node. For example, - chown root:root - Not Applicable. - scored: false + chown root:root /var/lib/cni/networks/ + scored: true - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: "stat -c %a $etcddatadir" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd + else + echo "permissions=700" + fi tests: test_items: - - flag: "700" + - flag: "permissions" compare: - op: eq + op: bitmask value: "700" - set: true remediation: | On the etcd server node, get the etcd data directory, passed as an argument --data-dir, from the command 'ps -ef | grep etcd'. Run the below command (based on the etcd data directory found above). For example, - chmod 700 /var/lib/etcd + chmod 700 $etcddatadir scored: true - id: 1.1.12 @@ -190,17 +177,14 @@ groups: test_items: - flag: "etcd:etcd" remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir, - from the command 'ps -ef | grep etcd'. - Run the below command (based on the etcd data directory found above). - For example, chown etcd:etcd /var/lib/etcd Not Applicable. + For K3s, etcd is embedded within the k3s process. There is no separate etcd process. + Therefore the etcd data directory ownership is managed by the k3s process and should be root:root. scored: true - id: 1.1.13 text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig'" - type: "skip" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" tests: test_items: - flag: "permissions" @@ -224,12 +208,12 @@ groups: set: true remediation: | Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root /etc/kubernetes/admin.conf + For example, chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig scored: true - id: 1.1.15 text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" tests: test_items: - flag: "permissions" @@ -244,7 +228,7 @@ groups: - id: 1.1.16 text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" tests: test_items: - flag: "root:root" @@ -256,7 +240,7 @@ groups: - id: 1.1.17 text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi'" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" tests: test_items: - flag: "permissions" @@ -271,7 +255,7 @@ groups: - id: 1.1.18 text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" - audit: "stat -c %U:%G /var/lib/rancher/k3s/server/cred/controller.kubeconfig" + audit: "stat -c %U:%G $controllermanagerkubeconfig" tests: test_items: - flag: "root:root" @@ -300,7 +284,7 @@ groups: - id: 1.1.20 text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" - audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.crt" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.crt'" use_multiple_values: true tests: test_items: @@ -309,14 +293,14 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. + Run the below command (based on the file location on your system) on the master node. For example, - chmod -R 600 /etc/kubernetes/pki/*.crt + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt scored: false - id: 1.1.21 - text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" - audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.key" + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key'" use_multiple_values: true tests: test_items: @@ -325,17 +309,17 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. + Run the below command (based on the file location on your system) on the master node. For example, - chmod -R 600 /etc/kubernetes/pki/*.key - scored: false + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key + scored: true - id: 1.2 text: "API Server" checks: - id: 1.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" tests: test_items: - flag: "--anonymous-auth" @@ -343,27 +327,29 @@ groups: op: eq value: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --anonymous-auth=false - scored: false + By default, K3s sets the --anonymous-auth argument to false. If it is set to true, + edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "anonymous-auth=true" + scored: true - id: 1.2.2 text: "Ensure that the --token-auth-file parameter is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--token-auth-file" set: false remediation: | - Follow the documentation and configure alternate mechanisms for authentication. Then, - edit the API server pod specification file $apiserverconf - on the control plane node and remove the --token-auth-file= parameter. + Follow the documentation and configure alternate mechanisms for authentication. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "token-auth-file=" scored: true - id: 1.2.3 text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -375,47 +361,46 @@ groups: - flag: "--enable-admission-plugins" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and remove the `DenyServiceExternalIPs` - from enabled admission plugins. + By default, K3s does not set DenyServiceExternalIPs. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" scored: true - id: 1.2.4 text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" tests: bin_op: and test_items: - flag: "--kubelet-client-certificate" - flag: "--kubelet-client-key" remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and kubelets. Then, edit API server pod specification file - $apiserverconf on the control plane node and set the - kubelet client certificate and key parameters as below. - --kubelet-client-certificate= - --kubelet-client-key= + By default, K3s automatically provides the kubelet client certificate and key. + They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" scored: true - id: 1.2.5 text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" - type: "skip" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" tests: test_items: - flag: "--kubelet-certificate-authority" remediation: | - Follow the Kubernetes documentation and setup the TLS connection between - the apiserver and kubelets. Then, edit the API server pod specification file - $apiserverconf on the control plane node and set the - --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. - --kubelet-certificate-authority= - Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + By default, K3s automatically provides the kubelet CA cert file, at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "kubelet-certificate-authority=" scored: true - id: 1.2.6 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -423,15 +408,15 @@ groups: op: nothave value: "AlwaysAllow" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. - One such example could be as below. - --authorization-mode=RBAC + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" scored: true - id: 1.2.7 text: "Ensure that the --authorization-mode argument includes Node (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -439,14 +424,14 @@ groups: op: has value: "Node" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes Node. - --authorization-mode=Node,RBAC + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. scored: true - id: 1.2.8 text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -454,14 +439,14 @@ groups: op: has value: "RBAC" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, - for example `--authorization-mode=Node,RBAC`. + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. scored: true - id: 1.2.9 text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -470,15 +455,15 @@ groups: value: "EventRateLimit" remediation: | Follow the Kubernetes documentation and set the desired limits in a configuration file. - Then, edit the API server pod specification file $apiserverconf - and set the below parameters. - --enable-admission-plugins=...,EventRateLimit,... - --admission-control-config-file= + Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters. + kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" scored: false - id: 1.2.10 text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: bin_op: or test_items: @@ -489,9 +474,10 @@ groups: - flag: "--enable-admission-plugins" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a - value that does not include AlwaysAdmit. + By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" scored: true - id: 1.2.11 @@ -504,10 +490,13 @@ groups: op: has value: "AlwaysPullImages" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - AlwaysPullImages. - --enable-admission-plugins=...,AlwaysPullImages,... + Permissive, per CIS guidelines, + "This setting could impact offline or isolated clusters, which have images pre-loaded and + do not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter. + kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." scored: false - id: 1.2.12 @@ -526,16 +515,13 @@ groups: op: has value: "PodSecurityPolicy" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - SecurityContextDeny, unless PodSecurityPolicy is already in place. - --enable-admission-plugins=...,SecurityContextDeny,... - Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + Not Applicable. + Enabling Pod Security Policy is no longer supported on K3s v1.25+ and will cause applications to unexpectedly fail. scored: false - id: 1.2.13 text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -546,15 +532,16 @@ groups: - flag: "--disable-admission-plugins" set: false remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. Follow the documentation and create ServiceAccount objects as per your environment. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and ensure that the --disable-admission-plugins parameter is set to a - value that does not include ServiceAccount. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" scored: true - id: 1.2.14 text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -565,14 +552,15 @@ groups: - flag: "--disable-admission-plugins" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --disable-admission-plugins parameter to - ensure it does not include NamespaceLifecycle. + By default, K3s does not set the --disable-admission-plugins to anything. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." scored: true - id: 1.2.15 text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -580,16 +568,16 @@ groups: op: has value: "NodeRestriction" remediation: | - Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to a - value that includes NodeRestriction. - --enable-admission-plugins=...,NodeRestriction,... + By default, K3s sets the --enable-admission-plugins to NodeRestriction. + If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins. + If you are, include NodeRestriction in the list. + kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." scored: true - id: 1.2.16 text: "Ensure that the --secure-port argument is not set to 0 - NoteThis recommendation is obsolete and will be deleted per the consensus process (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" tests: bin_op: or test_items: @@ -600,14 +588,15 @@ groups: - flag: "--secure-port" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --secure-port parameter or - set it to a different (non-zero) desired port. + By default, K3s sets the secure port to 6444. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "secure-port=" scored: true - id: 1.2.17 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" tests: test_items: - flag: "--profiling" @@ -615,30 +604,28 @@ groups: op: eq value: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --profiling=false + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "profiling=true" scored: true - id: 1.2.18 - text: "Ensure that the --audit-log-path argument is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--audit-log-path" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-path parameter to a suitable path and + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and file where you would like audit logs to be written, for example, - --audit-log-path=/var/log/apiserver/audit.log - Permissive. - scored: true + kube-apiserver-arg: + - "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log" + scored: false - id: 1.2.19 - text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--audit-log-maxage" @@ -646,17 +633,15 @@ groups: op: gte value: 30 remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxage parameter to 30 - or as an appropriate number of days, for example, - --audit-log-maxage=30 - Permissive. - scored: true + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example, + kube-apiserver-arg: + - "audit-log-maxage=30" + scored: false - id: 1.2.20 - text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--audit-log-maxbackup" @@ -664,17 +649,15 @@ groups: op: gte value: 10 remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate - value. For example, - --audit-log-maxbackup=10 - Permissive. - scored: true + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example, + kube-apiserver-arg: + - "audit-log-maxbackup=10" + scored: false - id: 1.2.21 - text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--audit-log-maxsize" @@ -682,29 +665,30 @@ groups: op: gte value: 100 remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. - For example, to set it as 100 MB, --audit-log-maxsize=100 - Permissive. - scored: true + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxsize parameter to an appropriate size in MB. For example, + kube-apiserver-arg: + - "audit-log-maxsize=100" + scored: false - id: 1.2.22 text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--request-timeout" remediation: | - Edit the API server pod specification file $apiserverconf - and set the below parameter as appropriate and if needed. - For example, --request-timeout=300s - Permissive. + Permissive, per CIS guidelines, + "it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". + Edit the K3s config file /etc/rancher/k3s/config.yaml + and set the below parameter if needed. For example, + kube-apiserver-arg: + - "request-timeout=300s" scored: false - id: 1.2.23 text: "Ensure that the --service-account-lookup argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -715,30 +699,36 @@ groups: op: eq value: true remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --service-account-lookup=true - Alternatively, you can delete the --service-account-lookup parameter from this file so + By default, K3s does not set the --service-account-lookup argument. + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example, + kube-apiserver-arg: + - "service-account-lookup=true" + Alternatively, you can delete the service-account-lookup parameter from this file so that the default takes effect. scored: true - id: 1.2.24 text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--service-account-key-file" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --service-account-key-file parameter - to the public key file for service accounts. For example, - --service-account-key-file= + K3s automatically generates and sets the service account key file. + It is located at /var/lib/rancher/k3s/server/tls/service.key. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "service-account-key-file=" scored: true - id: 1.2.25 text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + journalctl -m -u k3s | grep -m1 'Running kube-apiserver' | tail -n1 + else + echo "--etcd-certfile AND --etcd-keyfile" + fi tests: bin_op: and test_items: @@ -747,16 +737,17 @@ groups: - flag: "--etcd-keyfile" set: true remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate and key file parameters. - --etcd-certfile= - --etcd-keyfile= + K3s automatically generates and sets the etcd certificate and key files. + They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" scored: true - id: 1.2.26 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" tests: bin_op: and test_items: @@ -765,60 +756,61 @@ groups: - flag: "--tls-private-key-file" set: true remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the TLS certificate and private key file parameters. - --tls-cert-file= - --tls-private-key-file= + By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver. + They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" scored: true - id: 1.2.27 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" tests: test_items: - flag: "--client-ca-file" remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the client certificate authority file. - --client-ca-file= + By default, K3s automatically provides the client certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "client-ca-file=" scored: true - id: 1.2.28 text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" tests: test_items: - flag: "--etcd-cafile" remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate authority file parameter. - --etcd-cafile= + By default, K3s automatically provides the etcd certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-cafile=" scored: true - id: 1.2.29 text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" - type: "skip" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" tests: test_items: - flag: "--encryption-provider-config" remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --encryption-provider-config parameter to the path of that file. - For example, --encryption-provider-config= - Permissive - Enabling encryption changes how data can be recovered as data is encrypted. + K3s can be configured to use encryption providers to encrypt secrets at rest. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json. scored: false - id: 1.2.30 text: "Ensure that encryption providers are appropriately configured (Manual)" - type: "skip" audit: | - ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') - if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + ENCRYPTION_PROVIDER_CONFIG=$(journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi tests: test_items: - flag: "provider" @@ -826,14 +818,16 @@ groups: op: valid_elements value: "aescbc,kms,secretbox" remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - In this file, choose aescbc, kms or secretbox as the encryption provider. - Permissive - Enabling encryption changes how data can be recovered as data is encrypted. + K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json scored: false - - id: 1.2.32 - text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" tests: test_items: - flag: "--tls-cipher-suites" @@ -841,36 +835,32 @@ groups: op: valid_elements value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" remediation: | - Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml - on the control plane node and set the below parameter. - --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, - TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 - scored: false + By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. + If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. + If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following: + kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + scored: true - id: 1.3 text: "Controller Manager" checks: - id: 1.3.1 text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" tests: test_items: - flag: "--terminated-pod-gc-threshold" remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, - for example, --terminated-pod-gc-threshold=10 + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node + and set the --terminated-pod-gc-threshold to an appropriate threshold, + kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" scored: false - id: 1.3.2 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" tests: test_items: - flag: "--profiling" @@ -878,14 +868,15 @@ groups: op: eq value: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the below parameter. - --profiling=false + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "profiling=true" scored: true - id: 1.3.3 text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" tests: test_items: - flag: "--use-service-account-credentials" @@ -893,40 +884,43 @@ groups: op: noteq value: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node to set the below parameter. - --use-service-account-credentials=true + By default, K3s sets the --use-service-account-credentials argument to true. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "use-service-account-credentials=false" scored: true - id: 1.3.4 text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" tests: test_items: - flag: "--service-account-private-key-file" remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --service-account-private-key-file parameter - to the private key file for service accounts. - --service-account-private-key-file= + By default, K3s automatically provides the service account private key file. + It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "service-account-private-key-file=" scored: true - id: 1.3.5 text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" tests: test_items: - flag: "--root-ca-file" remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. - --root-ca-file= + By default, K3s automatically provides the root CA file. + It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "root-ca-file=" scored: true - id: 1.3.6 text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - type: "skip" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1" tests: bin_op: or test_items: @@ -938,10 +932,11 @@ groups: - flag: "--feature-gates" set: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. - --feature-gates=RotateKubeletServerCertificate=true - Not Applicable. + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" scored: true - id: 1.3.7 @@ -958,8 +953,10 @@ groups: - flag: "--bind-address" set: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and ensure the correct value for the --bind-address parameter + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "bind-address=" scored: true - id: 1.4 @@ -967,7 +964,7 @@ groups: checks: - id: 1.4.1 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1" tests: test_items: - flag: "--profiling" @@ -976,14 +973,15 @@ groups: value: false set: true remediation: | - Edit the Scheduler pod specification file $schedulerconf file - on the control plane node and set the below parameter. - --profiling=false + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "profiling=true" scored: true - id: 1.4.2 text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" tests: bin_op: or test_items: @@ -995,6 +993,8 @@ groups: - flag: "--bind-address" set: false remediation: | - Edit the Scheduler pod specification file $schedulerconf - on the control plane node and ensure the correct value for the --bind-address parameter + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "bind-address=" scored: true diff --git a/cfg/k3s-cis-1.7/node.yaml b/cfg/k3s-cis-1.7/node.yaml index 780bb4d8b..bb23b5e14 100644 --- a/cfg/k3s-cis-1.7/node.yaml +++ b/cfg/k3s-cis-1.7/node.yaml @@ -19,9 +19,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chmod 600 $kubeletsvc - Not Applicable - All configuration is passed in as arguments at container run time. + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. scored: true - id: 4.1.2 @@ -32,16 +31,15 @@ groups: test_items: - flag: root:root remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. Not Applicable. All configuration is passed in as arguments at container run time. scored: true - id: 4.1.3 - text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" - audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' tests: bin_op: or test_items: @@ -53,11 +51,11 @@ groups: Run the below command (based on the file location on your system) on the each worker node. For example, chmod 600 $proxykubeconfig - scored: false + scored: true - id: 4.1.4 - text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi'' ' + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' tests: bin_op: or test_items: @@ -65,7 +63,7 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig - scored: false + scored: true - id: 4.1.5 text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" @@ -84,7 +82,7 @@ groups: - id: 4.1.6 text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" - audit: 'stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig' + audit: 'stat -c %U:%G $kubeletkubeconfig' tests: test_items: - flag: root:root @@ -95,8 +93,8 @@ groups: scored: true - id: 4.1.7 - text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" - audit: "stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt" + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a $kubeletcafile" tests: test_items: - flag: "permissions" @@ -105,19 +103,22 @@ groups: value: "600" remediation: | Run the following command to modify the file permissions of the - --client-ca-file chmod 600 - scored: false + --client-ca-file chmod 600 $kubeletcafile + scored: true - id: 4.1.8 - text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" - audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt" + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $kubeletcafile" tests: test_items: - flag: root:root + compare: + op: eq + value: root:root remediation: | Run the following command to modify the ownership of the --client-ca-file. - chown root:root - scored: false + chown root:root $kubeletcafile + scored: true - id: 4.1.9 text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)" @@ -130,8 +131,8 @@ groups: op: bitmask value: "600" remediation: | - Run the following command (using the config file location identified in the Audit step) - chmod 600 $kubeletconf + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. scored: true - id: 4.1.10 @@ -142,10 +143,8 @@ groups: test_items: - flag: root:root remediation: | - Run the following command (using the config file location identified in the Audit step) - chown root:root $kubeletconf Not Applicable. - All configuration is passed in as arguments at container run time. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. scored: true - id: 4.2 @@ -153,7 +152,7 @@ groups: checks: - id: 4.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' tests: test_items: - flag: "--anonymous-auth" @@ -162,20 +161,20 @@ groups: op: eq value: false remediation: | - If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to - `false`. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - `--anonymous-auth=false` - Based on your system, restart the kubelet service. For example, + By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you + should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "anonymous-auth=true" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="anonymous-auth=true" + Based on your system, restart the k3s service. For example, systemctl daemon-reload - systemctl restart kubelet.service + systemctl restart k3s.service scored: true - id: 4.2.2 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' ' audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: test_items: @@ -185,38 +184,32 @@ groups: op: nothave value: AlwaysAllow remediation: | - If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If - using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example, + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "authorization-mode=AlwaysAllow" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="authorization-mode=AlwaysAllow" + Based on your system, restart the k3s service. For example, systemctl daemon-reload - systemctl restart kubelet.service + systemctl restart k3s.service scored: true - id: 4.2.3 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' tests: test_items: - flag: --client-ca-file path: '{.authentication.x509.clientCAFile}' remediation: | - If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --client-ca-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + By default, K3s automatically provides the client ca certificate for the Kubelet. + It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt scored: true - id: 4.2.4 - text: "Verify that the --read-only-port argument is set to 0 (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' " + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: bin_op: or @@ -230,19 +223,20 @@ groups: path: '{.readOnlyPort}' set: false remediation: | - If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --read-only-port=0 - Based on your system, restart the kubelet service. For example, + By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you + should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "read-only-port=XXXX" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="read-only-port=XXXX" + Based on your system, restart the k3s service. For example, systemctl daemon-reload - systemctl restart kubelet.service - scored: false + systemctl restart k3s.service + scored: true - id: 4.2.5 text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --streaming-connection-idle-timeout @@ -255,21 +249,17 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a - value other than 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --streaming-connection-idle-timeout=5m - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "streaming-connection-idle-timeout=5m" + If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: false - id: 4.2.6 text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" - type: "skip" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --make-iptables-util-chains @@ -282,41 +272,31 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove the --make-iptables-util-chains argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - Permissive. + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. + kubelet-arg: + - "make-iptables-util-chains=true" + If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: true - id: 4.2.7 - text: "Ensure that the --hostname-override argument is not set (Manual)" - # This is one of those properties that can only be set as a command line argument. - # To check if the property is set as expected, we need to parse the kubelet command - # instead reading the Kubelet Configuration file. + text: "Ensure that the --hostname-override argument is not set (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" type: "skip" - audit: "/bin/ps -fC $kubeletbin " tests: test_items: - flag: --hostname-override set: false remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and remove the --hostname-override argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service Not Applicable. - scored: false + By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply + with cloud providers that require this flag to ensure that hostname matches node names. + scored: true - id: 4.2.8 text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" - audit: "/bin/ps -fC $kubeletbin" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: test_items: @@ -330,19 +310,18 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + By default, K3s sets the event-qps to 0. Should you wish to change this, + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "event-qps=" + If using the command line, run K3s with --kubelet-arg="event-qps=". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: false - id: 4.2.9 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - type: "skip" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1" + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --tls-cert-file @@ -350,23 +329,18 @@ groups: - flag: --tls-private-key-file path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' remediation: | - If using a Kubelet config file, edit the file to set `tlsCertFile` to the location - of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` - to the location of the corresponding private key file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameters in KUBELET_CERTIFICATE_ARGS variable. - --tls-cert-file= - --tls-private-key-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. - scored: false + By default, K3s automatically provides the TLS certificate and private key for the Kubelet. + They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key + If for some reason you need to provide your own certificate and key, you can set the + the below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true - id: 4.2.10 - text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" - audit: "/bin/ps -fC $kubeletbin" + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: test_items: @@ -380,20 +354,16 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or - remove it altogether to use the default value. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS - variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false + By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter. + If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true - id: 4.2.11 - text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/cat $kubeletconf" tests: bin_op: or @@ -407,18 +377,17 @@ groups: path: '{.featureGates.RotateKubeletServerCertificate}' set: false remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. - --feature-gates=RotateKubeletServerCertificate=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - Not Applicable. - scored: false + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter. + If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true - id: 4.2.12 text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "/bin/ps -fC $kubeletbin" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: test_items: @@ -428,21 +397,18 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `TLSCipherSuites` to + kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" or to a subset of these values. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the --tls-cipher-suites parameter as follows, or to a subset of these values. - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service + If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=" + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: false - id: 4.2.13 text: "Ensure that a limit is set on pod PIDs (Manual)" - audit: "/bin/ps -fC $kubeletbin" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: test_items: @@ -450,5 +416,7 @@ groups: path: '{.podPidsLimit}' remediation: | Decide on an appropriate level for this parameter and set it, - either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `podPidsLimit` to + kubelet-arg: + - "pod-max-pids=" scored: false diff --git a/cfg/k3s-cis-1.7/policies.yaml b/cfg/k3s-cis-1.7/policies.yaml index 3b1d1ef17..f4d4a24e8 100644 --- a/cfg/k3s-cis-1.7/policies.yaml +++ b/cfg/k3s-cis-1.7/policies.yaml @@ -43,23 +43,15 @@ groups: - id: 5.1.5 text: "Ensure that default service accounts are not actively used. (Manual)" - type: "skip" - audit: check_for_default_sa.sh - tests: - test_items: - - flag: "true" - compare: - op: eq - value: "true" - set: true + type: "manual" remediation: | Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server. Modify the configuration of each default service account to include this value automountServiceAccountToken: false - Permissive - Kubernetes provides default service accounts to be used. scored: false + - id: 5.1.6 text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" type: "manual" @@ -138,29 +130,23 @@ groups: - id: 5.2.3 text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" - type: "skip" remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers. - Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. scored: false - id: 5.2.4 text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" - type: "skip" remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers. - Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. scored: false - id: 5.2.5 text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" - type: "skip" remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers. - Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. scored: false - id: 5.2.6 @@ -199,8 +185,8 @@ groups: text: "Minimize the admission of containers with capabilities assigned (Manual)" type: "manual" remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applicaions which do not require any Linux capabities to operate consider adding + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities. scored: false @@ -242,10 +228,8 @@ groups: - id: 5.3.2 text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" - type: "skip" remediation: | Follow the documentation and create NetworkPolicy objects as you need them. - Permissive - Enabling Network Policies can prevent certain applications from communicating with each other. scored: false - id: 5.4 @@ -310,9 +294,7 @@ groups: - id: 5.7.4 text: "The default namespace should not be used (Manual)" - type: "skip" remediation: | Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace. - Permissive - Kubernetes provides a default namespace. scored: false diff --git a/cfg/k3s-cis-1.8/config.yaml b/cfg/k3s-cis-1.8/config.yaml new file mode 100644 index 000000000..fff6edb7d --- /dev/null +++ b/cfg/k3s-cis-1.8/config.yaml @@ -0,0 +1,54 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - apiserver + - kubelet + - scheduler + - controllermanager + - etcd + - policies + apiserver: + bins: + - containerd + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + scheduler: + bins: + - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig + controllermanager: + bins: + - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/controller.kubeconfig + etcd: + bins: + - containerd + +etcd: + confs: /var/lib/rancher/k3s/server/db/etcd/config + +node: + components: + - kubelet + - proxy + kubelet: + bins: + - containerd + confs: + - /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig +policies: + components: + - policies diff --git a/cfg/k3s-cis-1.8/controlplane.yaml b/cfg/k3s-cis-1.8/controlplane.yaml new file mode 100644 index 000000000..d14070656 --- /dev/null +++ b/cfg/k3s-cis-1.8/controlplane.yaml @@ -0,0 +1,62 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/k3s-cis-1.8/etcd.yaml b/cfg/k3s-cis-1.8/etcd.yaml new file mode 100644 index 000000000..4a39779cb --- /dev/null +++ b/cfg/k3s-cis-1.8/etcd.yaml @@ -0,0 +1,144 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: and + test_items: + - path: "{.client-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt" + - path: "{.client-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom cert and key files. + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.client-transport-security.client-cert-auth}" + compare: + op: eq + value: true + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable client certificate authentication. + scored: false + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - path: "{.client-transport-security.auto-tls}" + compare: + op: eq + value: false + - path: "{.client-transport-security.auto-tls}" + set: false + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + client-transport-security: + auto-tls: false + scored: false + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: and + test_items: + - path: "{.peer-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt" + - path: "{.peer-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates peer cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom peer cert and key files. + scored: false + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.peer-transport-security.client-cert-auth}" + compare: + op: eq + value: true + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable peer client certificate authentication. + scored: false + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - path: "{.peer-transport-security.auto-tls}" + compare: + op: eq + value: false + - path: "{.peer-transport-security.auto-tls}" + set: false + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + peer-transport-security: + auto-tls: false + scored: false + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates a unique certificate authority for etcd. + This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use a shared certificate authority. + scored: false diff --git a/cfg/k3s-cis-1.8/master.yaml b/cfg/k3s-cis-1.8/master.yaml new file mode 100644 index 000000000..eb4799a17 --- /dev/null +++ b/cfg/k3s-cis-1.8/master.yaml @@ -0,0 +1,985 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated)" + audit: find /var/lib/cni/networks -type f ! -name lock 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + By default, K3s sets the CNI file permissions to 600. + Note that for many CNIs, a lock file is created with permissions 750. This is expected and can be ignored. + If you modify your CNI configuration, ensure that the permissions are set to 600. + For example, chmod 600 /var/lib/cni/networks/ + scored: true + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)" + audit: find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root /var/lib/cni/networks/ + scored: true + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd + else + echo "permissions=700" + fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + type: "skip" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + Not Applicable. + For K3s, etcd is embedded within the k3s process. There is no separate etcd process. + Therefore the etcd data directory ownership is managed by the k3s process and should be root:root. + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $controllermanagerkubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /var/lib/rancher/k3s/server/tls + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.crt'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key + scored: true + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --anonymous-auth argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "anonymous-auth=true" + scored: true + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "token-auth-file=" + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s does not set DenyServiceExternalIPs. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + By default, K3s automatically provides the kubelet client certificate and key. + They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + By default, K3s automatically provides the kubelet CA cert file, at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "kubelet-certificate-authority=" + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters. + kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Permissive, per CIS guidelines, + "This setting could impact offline or isolated clusters, which have images pre-loaded and + do not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter. + kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Not Applicable. + Enabling Pod Security Policy is no longer supported on K3s v1.25+ and will cause applications to unexpectedly fail. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. + Follow the documentation and create ServiceAccount objects as per your environment. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + By default, K3s sets the --enable-admission-plugins to NodeRestriction. + If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins. + If you are, include NodeRestriction in the list. + kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." + scored: true + + - id: 1.2.16 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "profiling=true" + scored: true + + - id: 1.2.17 + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + kube-apiserver-arg: + - "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log" + scored: false + + - id: 1.2.18 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example, + kube-apiserver-arg: + - "audit-log-maxage=30" + scored: false + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example, + kube-apiserver-arg: + - "audit-log-maxbackup=10" + scored: false + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxsize parameter to an appropriate size in MB. For example, + kube-apiserver-arg: + - "audit-log-maxsize=100" + scored: false + + - id: 1.2.21 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--request-timeout" + remediation: | + Permissive, per CIS guidelines, + "it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". + Edit the K3s config file /etc/rancher/k3s/config.yaml + and set the below parameter if needed. For example, + kube-apiserver-arg: + - "request-timeout=300s" + scored: false + + - id: 1.2.22 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + By default, K3s does not set the --service-account-lookup argument. + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example, + kube-apiserver-arg: + - "service-account-lookup=true" + Alternatively, you can delete the service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.23 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + K3s automatically generates and sets the service account key file. + It is located at /var/lib/rancher/k3s/server/tls/service.key. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "service-account-key-file=" + scored: true + + - id: 1.2.24 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + journalctl -m -u k3s | grep -m1 'Running kube-apiserver' | tail -n1 + else + echo "--etcd-certfile AND --etcd-keyfile" + fi + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + K3s automatically generates and sets the etcd certificate and key files. + They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" + scored: true + + - id: 1.2.25 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver. + They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true + + - id: 1.2.26 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + By default, K3s automatically provides the client certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "client-ca-file=" + scored: true + + - id: 1.2.27 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + By default, K3s automatically provides the etcd certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-cafile=" + scored: true + + - id: 1.2.28 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + K3s can be configured to use encryption providers to encrypt secrets at rest. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json. + scored: false + + - id: 1.2.29 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json + scored: false + + - id: 1.2.30 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. + If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. + If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following: + kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + scored: true + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node + and set the --terminated-pod-gc-threshold to an appropriate threshold, + kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "profiling=true" + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + By default, K3s sets the --use-service-account-credentials argument to true. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "use-service-account-credentials=false" + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + By default, K3s automatically provides the service account private key file. + It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "service-account-private-key-file=" + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + By default, K3s automatically provides the root CA file. + It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "root-ca-file=" + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "bind-address=" + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "profiling=true" + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "bind-address=" + scored: true diff --git a/cfg/k3s-cis-1.8/node.yaml b/cfg/k3s-cis-1.8/node.yaml new file mode 100644 index 000000000..eef9414de --- /dev/null +++ b/cfg/k3s-cis-1.8/node.yaml @@ -0,0 +1,422 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + Not Applicable. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: 'stat -c %U:%G $kubeletkubeconfig' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a $kubeletcafile" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 $kubeletcafile + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $kubeletcafile" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root $kubeletcafile + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: root:root + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you + should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "anonymous-auth=true" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="anonymous-auth=true" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' ' + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "authorization-mode=AlwaysAllow" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="authorization-mode=AlwaysAllow" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + By default, K3s automatically provides the client ca certificate for the Kubelet. + It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you + should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "read-only-port=XXXX" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="read-only-port=XXXX" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "streaming-connection-idle-timeout=5m" + If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. + kubelet-arg: + - "make-iptables-util-chains=true" + If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + type: "skip" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Not Applicable. + By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply + with cloud providers that require this flag to ensure that hostname matches node names. + scored: true + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + By default, K3s sets the event-qps to 0. Should you wish to change this, + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "event-qps=" + If using the command line, run K3s with --kubelet-arg="event-qps=". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --tls-cert-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.crt' + - flag: --tls-private-key-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' + remediation: | + By default, K3s automatically provides the TLS certificate and private key for the Kubelet. + They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter. + If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter. + If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `TLSCipherSuites` to + kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + or to a subset of these values. + If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=" + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `podPidsLimit` to + kubelet-arg: + - "pod-max-pids=" + scored: false diff --git a/cfg/k3s-cis-1.8/policies.yaml b/cfg/k3s-cis-1.8/policies.yaml new file mode 100644 index 000000000..29c65ef42 --- /dev/null +++ b/cfg/k3s-cis-1.8/policies.yaml @@ -0,0 +1,300 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: true + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/rke-cis-1.23/policies.yaml b/cfg/rke-cis-1.23/policies.yaml index 611d2c82c..5e4c6d640 100644 --- a/cfg/rke-cis-1.23/policies.yaml +++ b/cfg/rke-cis-1.23/policies.yaml @@ -42,16 +42,8 @@ groups: scored: false - id: 5.1.5 - text: "Ensure that default service accounts are not actively used. (Automated)" - type: "skip" - audit: check_for_default_sa.sh - tests: - test_items: - - flag: "true" - compare: - op: eq - value: "true" - set: true + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" remediation: | Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server. diff --git a/cfg/rke-cis-1.24/policies.yaml b/cfg/rke-cis-1.24/policies.yaml index 362771a65..2b8dba3e1 100644 --- a/cfg/rke-cis-1.24/policies.yaml +++ b/cfg/rke-cis-1.24/policies.yaml @@ -44,14 +44,6 @@ groups: - id: 5.1.5 text: "Ensure that default service accounts are not actively used. (Manual)" type: "manual" - audit: check_for_default_sa.sh - tests: - test_items: - - flag: "true" - compare: - op: eq - value: "true" - set: true remediation: | Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server. diff --git a/cfg/rke-cis-1.7/policies.yaml b/cfg/rke-cis-1.7/policies.yaml index a543875c3..207dd7808 100644 --- a/cfg/rke-cis-1.7/policies.yaml +++ b/cfg/rke-cis-1.7/policies.yaml @@ -43,21 +43,12 @@ groups: - id: 5.1.5 text: "Ensure that default service accounts are not actively used. (Manual)" - type: "skip" - audit: check_for_default_sa.sh - tests: - test_items: - - flag: "true" - compare: - op: eq - value: "true" - set: true + type: "manual" remediation: | Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server. Modify the configuration of each default service account to include this value automountServiceAccountToken: false - Permissive - Kubernetes provides default service accounts to be used. scored: false - id: 5.1.6 From 2751f870348e067306caed2017513ea42a03a961 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Thu, 26 Sep 2024 00:45:48 -0400 Subject: [PATCH 237/262] Fix audit and remediation for CIS-1.9 master 1.1.13/1.1.14 (#1649) * Fix audit and remediation for CIS-1.9 master 1.1.13/1.1.14 * Fix loop syntax for file paths --------- Co-authored-by: afdesk --- cfg/cis-1.9/master.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cfg/cis-1.9/master.yaml b/cfg/cis-1.9/master.yaml index ad1423eb5..50edab16c 100644 --- a/cfg/cis-1.9/master.yaml +++ b/cfg/cis-1.9/master.yaml @@ -189,7 +189,7 @@ groups: - id: 1.1.13 text: "Ensure that the default administrative credential file permissions are set to 600 (Automated)" audit: | - for adminconf in /etc/kubernetes/{admin.conf,super-admin.conf}; do if test -e $adminconf; then stat -c \"permissions=%a %n\" $adminconf; fi; done + for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "permissions=%a %n" $adminconf; fi; done use_multiple_values: true tests: test_items: @@ -207,7 +207,7 @@ groups: - id: 1.1.14 text: "Ensure that the default administrative credential file ownership is set to root:root (Automated)" audit: | - for adminconf in /tmp/{admin.conf,super-admin.conf}; do if test -e $adminconf; then stat -c "ownership=%U:%G %n" $adminconf; fi; done + for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "ownership=%U:%G %n" $adminconf; fi; done use_multiple_values: true tests: test_items: @@ -219,7 +219,7 @@ groups: Run the below command (based on the file location on your system) on the control plane node. For example, chown root:root /etc/kubernetes/admin.conf On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present. - For example, chmod 600 /etc/kubernetes/super-admin.conf + For example, chown root:root /etc/kubernetes/super-admin.conf scored: true - id: 1.1.15 From f6877e3c17c31f9163cbc2603e99eb4b4bae9977 Mon Sep 17 00:00:00 2001 From: Wolfgang Reichert <76220870+w-reichert@users.noreply.github.com> Date: Sat, 28 Sep 2024 09:36:44 +0200 Subject: [PATCH 238/262] Fix issue 1595: failed to output to ASFF (#1691) A breaking change was introduced in aws-sdk-go-v2. See https://github.com/aws/aws-sdk-go-v2/issues/2370#issuecomment-1953308268. Mixing aws-sdk-go-v2 packages from versions before and after the breaking change causes kube-bench to fail. This issue occurs when it attempts to access AWS Security Hub. Addressed issue: https://github.com/aquasecurity/kube-bench/issues/1595 Supersedes bot PR: https://github.com/aquasecurity/kube-bench/pull/1689 Besides upgrading to latest SDK version, some variable types need to be adapted. --- check/controls.go | 2 +- check/controls_test.go | 2 +- go.mod | 29 +++++++++++++++-------------- go.sum | 29 +++++++++++++++++++++++++++++ internal/findings/publisher.go | 8 ++++---- 5 files changed, 50 insertions(+), 20 deletions(-) diff --git a/check/controls.go b/check/controls.go index 15db8312b..171835058 100644 --- a/check/controls.go +++ b/check/controls.go @@ -252,7 +252,7 @@ func (controls *Controls) ASFF() ([]types.AwsSecurityFinding, error) { f := types.AwsSecurityFinding{ AwsAccountId: aws.String(account), - Confidence: *aws.Int32(100), + Confidence: aws.Int32(100), GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", arn, controls.Version, check.ID)), Id: id, CreatedAt: aws.String(tf), diff --git a/check/controls_test.go b/check/controls_test.go index c6d7f1c59..c2f6ab36e 100644 --- a/check/controls_test.go +++ b/check/controls_test.go @@ -407,7 +407,7 @@ func TestControls_ASFF(t *testing.T) { want: []types.AwsSecurityFinding{ { AwsAccountId: aws.String("foo account"), - Confidence: *aws.Int32(100), + Confidence: aws.Int32(100), GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", fmt.Sprintf(ARN, "somewhere"), "1", "check1id")), Description: aws.String("check1text"), ProductArn: aws.String(fmt.Sprintf(ARN, "somewhere")), diff --git a/go.mod b/go.mod index e0a3c5776..304aa3c62 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/aquasecurity/kube-bench go 1.22 require ( - github.com/aws/aws-sdk-go-v2 v1.26.0 - github.com/aws/aws-sdk-go-v2/config v1.27.4 - github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 + github.com/aws/aws-sdk-go-v2 v1.31.0 + github.com/aws/aws-sdk-go-v2/config v1.27.37 + github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.1 github.com/fatih/color v1.16.0 github.com/golang/glog v1.2.0 github.com/magiconair/properties v1.8.7 @@ -22,17 +22,17 @@ require ( ) require ( - github.com/aws/aws-sdk-go-v2/credentials v1.17.4 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 // indirect - github.com/aws/smithy-go v1.20.1 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.35 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.23.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.31.1 // indirect + github.com/aws/smithy-go v1.21.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect @@ -53,6 +53,7 @@ require ( github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect diff --git a/go.sum b/go.sum index 979defc02..95cda5736 100644 --- a/go.sum +++ b/go.sum @@ -1,35 +1,63 @@ github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA= github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= +github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U= +github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA= github.com/aws/aws-sdk-go-v2/config v1.27.4 h1:AhfWb5ZwimdsYTgP7Od8E9L1u4sKmDW2ZVeLcf2O42M= github.com/aws/aws-sdk-go-v2/config v1.27.4/go.mod h1:zq2FFXK3A416kiukwpsd+rD4ny6JC7QSkp4QdN1Mp2g= +github.com/aws/aws-sdk-go-v2/config v1.27.37 h1:xaoIwzHVuRWRHFI0jhgEdEGc8xE1l91KaeRDsWEIncU= +github.com/aws/aws-sdk-go-v2/config v1.27.37/go.mod h1:S2e3ax9/8KnMSyRVNd3sWTKs+1clJ2f1U6nE0lpvQRg= github.com/aws/aws-sdk-go-v2/credentials v1.17.4 h1:h5Vztbd8qLppiPwX+y0Q6WiwMZgpd9keKe2EAENgAuI= github.com/aws/aws-sdk-go-v2/credentials v1.17.4/go.mod h1:+30tpwrkOgvkJL1rUZuRLoxcJwtI/OkeBLYnHxJtVe0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.35 h1:7QknrZhYySEB1lEXJxGAmuD5sWwys5ZXNr4m5oEz0IE= +github.com/aws/aws-sdk-go-v2/credentials v1.17.35/go.mod h1:8Vy4kk7at4aPSmibr7K+nLTzG6qUQAUO4tW49fzUV4E= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 h1:AK0J8iYBFeUk2Ax7O8YpLtFsfhdOByh2QIkHmigpRYk= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2/go.mod h1:iRlGzMix0SExQEviAyptRWRGdYNo3+ufW/lCzvKVTUc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 h1:bNo4LagzUKbjdxE0tIcR9pMzLR2U/Tgie1Hq1HQ3iH8= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2/go.mod h1:wRQv0nN6v9wDXuWThpovGQjqF1HFdcgWjporw14lS8k= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 h1:kYQ3H1u0ANr9KEKlGs/jTLrBFPo8P8NaH/w7A01NeeM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18/go.mod h1:r506HmK5JDUh9+Mw4CfGJGSSoqIiLCndAuqXuhbv67Y= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24/go.mod h1:gAuCezX/gob6BSMbItsSlMb6WZGV7K2+fWOvk8xBSto= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 h1:EtOU5jsPdIQNP+6Q2C5e3d65NKT1PeCiQk+9OdzO12Q= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2/go.mod h1:tyF5sKccmDz0Bv4NrstEr+/9YkSPJHrcO7UsUKf7pWM= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 h1:Z7IdFUONvTcvS7YuhtVxN99v2cCoHRXOS4mTr0B/pUc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18/go.mod h1:DkKMmksZVVyat+Y+r1dEOgJEfUeA7UngIHWeKsi0yNc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 h1:QFASJGfT8wMXtuP3D5CRmMjARHv9ZmzFUMJznHDOY3w= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5/go.mod h1:QdZ3OmoIjSX+8D1OPAzPxDfjXASbBMDsz9qvtyIhtik= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 h1:5ffmXjPtwRExp1zc7gENLgCPyHFbhEPwVTkTiH9niSk= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2/go.mod h1:Ru7vg1iQ7cR4i7SZ/JTLYN9kaXtbL69UdgG0OQWQxW0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 h1:Xbwbmk44URTiHNx6PNo0ujDE6ERlsCKJD3u1zfnzAPg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20/go.mod h1:oAfOFzUB14ltPZj1rWwRc3d/6OgD76R8KlvU3EqM9Fg= github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 h1:+lpa31bGPPvgpZwUJ4ldKRCsPukzJ0PqoO5AQ9S79oQ= github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1/go.mod h1:vKGWzDG4Ytw3hgv/FvNy0HX/XEoJ6k/e7KAANzXWP8Y= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.1 h1:ue90LN6bFGMtmUbR+mka0kL5Up3EiNMHen7TUW3NCNA= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.1/go.mod h1:QFtYEC35t39ftJ6emZgapzdtBjGZsuR4bAd73SiG23I= github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 h1:utEGkfdQ4L6YW/ietH7111ZYglLJvS+sLriHJ1NBJEQ= github.com/aws/aws-sdk-go-v2/service/sso v1.20.1/go.mod h1:RsYqzYr2F2oPDdpy+PdhephuZxTfjHQe7SOBcZGoAU8= +github.com/aws/aws-sdk-go-v2/service/sso v1.23.1 h1:2jrVsMHqdLD1+PA4BA6Nh1eZp0Gsy3mFSB5MxDvcJtU= +github.com/aws/aws-sdk-go-v2/service/sso v1.23.1/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 h1:9/GylMS45hGGFCcMrUZDVayQE1jYSIN6da9jo7RAYIw= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1/go.mod h1:YjAPFn4kGFqKC54VsHs5fn5B6d+PCY2tziEa3U/GB5Y= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1 h1:0L7yGCg3Hb3YQqnSgBTZM5wepougtL1aEccdcdYhHME= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E= github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 h1:3I2cBEYgKhrWlwyZgfpSO2BpaMY1LHPqXYk/QGlu2ew= github.com/aws/aws-sdk-go-v2/service/sts v1.28.1/go.mod h1:uQ7YYKZt3adCRrdCBREm1CD3efFLOUNH77MrUCvx5oA= +github.com/aws/aws-sdk-go-v2/service/sts v1.31.1 h1:8K0UNOkZiK9Uh3HIF6Bx0rcNCftqGCeKmOaR7Gp5BSo= +github.com/aws/aws-sdk-go-v2/service/sts v1.31.1/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/smithy-go v1.21.0 h1:H7L8dtDRk0P1Qm6y0ji7MCYMQObJ5R9CRpyPhRUkLYA= +github.com/aws/smithy-go v1.21.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -104,6 +132,7 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= diff --git a/internal/findings/publisher.go b/internal/findings/publisher.go index 1847a77fe..4f960afdb 100644 --- a/internal/findings/publisher.go +++ b/internal/findings/publisher.go @@ -54,11 +54,11 @@ func (p *Publisher) PublishFinding(finding []types.AwsSecurityFinding) (*Publish errs = errors.Wrap(err, "finding publish failed") } if r != nil { - if r.FailedCount != 0 { - o.FailedCount += r.FailedCount + if *r.FailedCount != 0 { + o.FailedCount += *r.FailedCount } - if r.SuccessCount != 0 { - o.SuccessCount += r.SuccessCount + if *r.SuccessCount != 0 { + o.SuccessCount += *r.SuccessCount } o.FailedFindings = append(o.FailedFindings, r.FailedFindings...) } From b85ec78a84b9d98cfd732d18504d4dfa78c79582 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Sun, 29 Sep 2024 23:54:45 -0400 Subject: [PATCH 239/262] Fix CIS-1.9 policies 5.1.1/5.1.5 typos (#1658) * Fix CIS-1.9 policies 5.1.1 typo * Fix typo CIS-1.9 5.1.5 * Add new lines to CIS-1.9 --- cfg/cis-1.9/controlplane.yaml | 2 ++ cfg/cis-1.9/policies.yaml | 37 +++++++++++++++++++++++++++++++++-- 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/cfg/cis-1.9/controlplane.yaml b/cfg/cis-1.9/controlplane.yaml index a9a5df5b9..4b58b63d4 100644 --- a/cfg/cis-1.9/controlplane.yaml +++ b/cfg/cis-1.9/controlplane.yaml @@ -15,6 +15,7 @@ groups: Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of client certificates. scored: false + - id: 3.1.2 text: "Service account token authentication should not be used for users (Manual)" type: "manual" @@ -22,6 +23,7 @@ groups: Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented in place of service account tokens. scored: false + - id: 3.1.3 text: "Bootstrap token authentication should not be used for users (Manual)" type: "manual" diff --git a/cfg/cis-1.9/policies.yaml b/cfg/cis-1.9/policies.yaml index e99c42c98..0adec2ea7 100644 --- a/cfg/cis-1.9/policies.yaml +++ b/cfg/cis-1.9/policies.yaml @@ -18,7 +18,7 @@ groups: else is_compliant="true" fi; - echo "**role_name: ${role_name} role_binding: ${rolebinding} subject: ${subject} is_compliant: ${is_compliant}" + echo "**role_name: ${role_name} role_binding: ${role_binding} subject: ${subject} is_compliant: ${is_compliant}" done use_multiple_values: true tests: @@ -34,6 +34,7 @@ groups: clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name] Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin. scored: true + - id: 5.1.2 text: "Minimize access to secrets (Automated)" audit: "echo \"canGetListWatchSecretsAsSystemAuthenticated: $(kubectl auth can-i get,list,watch secrets --all-namespaces --as=system:authenticated)\"" @@ -46,6 +47,7 @@ groups: remediation: | Where possible, remove get, list and watch access to Secret objects in the cluster. scored: true + - id: 5.1.3 text: "Minimize wildcard use in Roles and ClusterRoles (Automated)" audit: | @@ -92,6 +94,7 @@ groups: Condition: role_is_compliant is false if ["*"] is found in rules. Condition: clusterrole_is_compliant is false if ["*"] is found in rules. scored: true + - id: 5.1.4 text: "Minimize access to create pods (Automated)" audit: | @@ -106,7 +109,7 @@ groups: Where possible, remove create access to pod objects in the cluster. scored: true - id: 5.1.5 - text: "Ensure that default service accounts are not actively used. (Automated)" + text: "Ensure that default service accounts are not actively used (Automated)" audit: | kubectl get serviceaccount --all-namespaces --field-selector metadata.name=default -o=json | jq -r '.items[] | " namespace: \(.metadata.namespace), kind: \(.kind), name: \(.metadata.name), automountServiceAccountToken: \(.automountServiceAccountToken | if . == null then "notset" else . end )"' | xargs -L 1 use_multiple_values: true @@ -123,6 +126,7 @@ groups: Modify the configuration of each default service account to include this value `automountServiceAccountToken: false`. scored: true + - id: 5.1.6 text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)" audit: | @@ -155,48 +159,56 @@ groups: - ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset - ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false scored: true + - id: 5.1.7 text: "Avoid use of system:masters group (Manual)" type: "manual" remediation: | Remove the system:masters group from all users in the cluster. scored: false + - id: 5.1.8 text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" type: "manual" remediation: | Where possible, remove the impersonate, bind and escalate rights from subjects. scored: false + - id: 5.1.9 text: "Minimize access to create persistent volumes (Manual)" type: "manual" remediation: | Where possible, remove create access to PersistentVolume objects in the cluster. scored: false + - id: 5.1.10 text: "Minimize access to the proxy sub-resource of nodes (Manual)" type: "manual" remediation: | Where possible, remove access to the proxy sub-resource of node objects. scored: false + - id: 5.1.11 text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" type: "manual" remediation: | Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. scored: false + - id: 5.1.12 text: "Minimize access to webhook configuration objects (Manual)" type: "manual" remediation: | Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects scored: false + - id: 5.1.13 text: "Minimize access to the service account token creation (Manual)" type: "manual" remediation: | Where possible, remove access to the token sub-resource of serviceaccount objects. scored: false + - id: 5.2 text: "Pod Security Standards" checks: @@ -207,6 +219,7 @@ groups: Ensure that either Pod Security Admission or an external policy control system is in place for every namespace which contains user workloads. scored: false + - id: 5.2.2 text: "Minimize the admission of privileged containers (Manual)" type: "manual" @@ -214,6 +227,7 @@ groups: Add policies to each namespace in the cluster which has user workloads to restrict the admission of privileged containers. scored: false + - id: 5.2.3 text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" type: "manual" @@ -221,6 +235,7 @@ groups: Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers. scored: false + - id: 5.2.4 text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" type: "manual" @@ -228,6 +243,7 @@ groups: Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers. scored: false + - id: 5.2.5 text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" type: "manual" @@ -235,6 +251,7 @@ groups: Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers. scored: false + - id: 5.2.6 text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" type: "manual" @@ -242,6 +259,7 @@ groups: Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. scored: false + - id: 5.2.7 text: "Minimize the admission of root containers (Manual)" type: "manual" @@ -249,6 +267,7 @@ groups: Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` or `MustRunAs` with the range of UIDs not including 0, is set. scored: false + - id: 5.2.8 text: "Minimize the admission of containers with the NET_RAW capability (Manual)" type: "manual" @@ -256,6 +275,7 @@ groups: Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with the `NET_RAW` capability. scored: false + - id: 5.2.9 text: "Minimize the admission of containers with added capabilities (Manual)" type: "manual" @@ -263,6 +283,7 @@ groups: Ensure that `allowedCapabilities` is not present in policies for the cluster unless it is set to an empty array. scored: false + - id: 5.2.10 text: "Minimize the admission of containers with capabilities assigned (Manual)" type: "manual" @@ -271,6 +292,7 @@ groups: contains applicaions which do not require any Linux capabities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities. scored: false + - id: 5.2.11 text: "Minimize the admission of Windows HostProcess containers (Manual)" type: "manual" @@ -278,6 +300,7 @@ groups: Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. scored: false + - id: 5.2.12 text: "Minimize the admission of HostPath volumes (Manual)" type: "manual" @@ -285,6 +308,7 @@ groups: Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers with `hostPath` volumes. scored: false + - id: 5.2.13 text: "Minimize the admission of containers which use HostPorts (Manual)" type: "manual" @@ -292,6 +316,7 @@ groups: Add policies to each namespace in the cluster which has user workloads to restrict the admission of containers which use `hostPort` sections. scored: false + - id: 5.3 text: "Network Policies and CNI" checks: @@ -303,12 +328,14 @@ groups: making use of a different plugin, or finding an alternate mechanism for restricting traffic in the Kubernetes cluster. scored: false + - id: 5.3.2 text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" type: "manual" remediation: | Follow the documentation and create NetworkPolicy objects as you need them. scored: false + - id: 5.4 text: "Secrets Management" checks: @@ -319,6 +346,7 @@ groups: If possible, rewrite application code to read Secrets from mounted secret files, rather than from environment variables. scored: false + - id: 5.4.2 text: "Consider external secret storage (Manual)" type: "manual" @@ -326,6 +354,7 @@ groups: Refer to the Secrets management options offered by your cloud provider or a third-party secrets management solution. scored: false + - id: 5.5 text: "Extensible Admission Control" checks: @@ -335,6 +364,7 @@ groups: remediation: | Follow the Kubernetes documentation and setup image provenance. scored: false + - id: 5.7 text: "General Policies" checks: @@ -345,6 +375,7 @@ groups: Follow the documentation and create namespaces for objects in your deployment as you need them. scored: false + - id: 5.7.2 text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" type: "manual" @@ -355,6 +386,7 @@ groups: seccompProfile: type: RuntimeDefault scored: false + - id: 5.7.3 text: "Apply SecurityContext to your Pods and Containers (Manual)" type: "manual" @@ -363,6 +395,7 @@ groups: suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker Containers. scored: false + - id: 5.7.4 text: "The default namespace should not be used (Manual)" type: "manual" From 4b4c1ce70972b92a9f843b1660045f589757ad79 Mon Sep 17 00:00:00 2001 From: Andy Pitcher Date: Mon, 30 Sep 2024 00:30:59 -0400 Subject: [PATCH 240/262] Modify `1.2.3 Ensure that the DenyServiceExternalIPs is set` in CIS-1.7/1.8 (#1607) * Modify 1.2.3 Ensure that the DenyServiceExternalIPs is set - op changed from `have` to `has` and removed bin_op: or - remediation description changed to only include --enable-admission-plugins * Apply changes for CIS-1.9 --- cfg/cis-1.7/master.yaml | 7 +++---- cfg/cis-1.8/master.yaml | 7 +++---- cfg/cis-1.9/master.yaml | 6 +++--- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/cfg/cis-1.7/master.yaml b/cfg/cis-1.7/master.yaml index 135824202..283e89e17 100644 --- a/cfg/cis-1.7/master.yaml +++ b/cfg/cis-1.7/master.yaml @@ -345,16 +345,15 @@ groups: text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" tests: - bin_op: or test_items: - flag: "--enable-admission-plugins" compare: - op: have + op: has value: "DenyServiceExternalIPs" remediation: | Edit the API server pod specification file $apiserverconf - on the control plane node and remove the `DenyServiceExternalIPs` - from enabled admission plugins. + on the control plane node and add the `DenyServiceExternalIPs` plugin + to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs. scored: false - id: 1.2.4 diff --git a/cfg/cis-1.8/master.yaml b/cfg/cis-1.8/master.yaml index d0c1332a3..8d639daab 100644 --- a/cfg/cis-1.8/master.yaml +++ b/cfg/cis-1.8/master.yaml @@ -345,16 +345,15 @@ groups: text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" tests: - bin_op: or test_items: - flag: "--enable-admission-plugins" compare: - op: have + op: has value: "DenyServiceExternalIPs" remediation: | Edit the API server pod specification file $apiserverconf - on the control plane node and remove the `DenyServiceExternalIPs` - from enabled admission plugins. + on the control plane node and add the `DenyServiceExternalIPs` plugin + to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs. scored: false - id: 1.2.4 diff --git a/cfg/cis-1.9/master.yaml b/cfg/cis-1.9/master.yaml index 50edab16c..51b9ab5d1 100644 --- a/cfg/cis-1.9/master.yaml +++ b/cfg/cis-1.9/master.yaml @@ -360,12 +360,12 @@ groups: test_items: - flag: "--enable-admission-plugins" compare: - op: have + op: has value: "DenyServiceExternalIPs" remediation: | Edit the API server pod specification file $apiserverconf - on the control plane node and remove the `DenyServiceExternalIPs` - from enabled admission plugins. + on the control plane node and add the `DenyServiceExternalIPs` plugin + to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs. scored: false - id: 1.2.4 From 674d8e8bb7562fd806f2bfdb58fb64cccdb29293 Mon Sep 17 00:00:00 2001 From: za Date: Mon, 30 Sep 2024 13:13:10 +0700 Subject: [PATCH 241/262] Update command to build docker to run in EKS cluster (#1648) because with the previous command unable to get the argument. Issue: https://github.com/aquasecurity/kube-bench/issues/1647 Co-authored-by: za --- docs/running.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/running.md b/docs/running.md index 0ee670ff8..e3eb06fd0 100644 --- a/docs/running.md +++ b/docs/running.md @@ -92,7 +92,7 @@ aws ecr create-repository --repository-name k8s/kube-bench --image-tag-mutabilit git clone https://github.com/aquasecurity/kube-bench.git cd kube-bench aws ecr get-login-password --region | docker login --username AWS --password-stdin .dkr.ecr..amazonaws.com -docker build -t k8s/kube-bench . +make build-docker IMAGE_NAME=k8s/kube-bench docker tag k8s/kube-bench:latest .dkr.ecr..amazonaws.com/k8s/kube-bench:latest docker push .dkr.ecr..amazonaws.com/k8s/kube-bench:latest ``` From 89842dcaaa94039a4b95b0289c8bad0062464c68 Mon Sep 17 00:00:00 2001 From: Winnerson Kharsunai <61133554+winkrs@users.noreply.github.com> Date: Tue, 1 Oct 2024 11:02:23 +0530 Subject: [PATCH 242/262] update dockerfile to add package findutils (#1657) --- Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index eacd2532a..b21c9925e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,9 +19,10 @@ RUN chmod +x /usr/local/bin/kubectl FROM alpine:3.20.0 AS run WORKDIR /opt/kube-bench/ -# add GNU ps for -C, -o cmd, and --no-headers support +# add GNU ps for -C, -o cmd, --no-headers support and add findutils to get GNU xargs # https://github.com/aquasecurity/kube-bench/issues/109 -RUN apk --no-cache add procps +# https://github.com/aquasecurity/kube-bench/issues/1656 +RUN apk --no-cache add procps findutils # Upgrading apk-tools to remediate CVE-2021-36159 - https://snyk.io/vuln/SNYK-ALPINE314-APKTOOLS-1533752 # https://github.com/aquasecurity/kube-bench/issues/943 From 7ea1d59bb1a6e34252e60f72f9c132e5dd180534 Mon Sep 17 00:00:00 2001 From: Winnerson Kharsunai <61133554+winkrs@users.noreply.github.com> Date: Tue, 1 Oct 2024 11:18:02 +0530 Subject: [PATCH 243/262] update audit script for cis-1.9 kubernetes policies id 5.1.6 (#1655) --- cfg/cis-1.9/policies.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cfg/cis-1.9/policies.yaml b/cfg/cis-1.9/policies.yaml index 0adec2ea7..770d2cb28 100644 --- a/cfg/cis-1.9/policies.yaml +++ b/cfg/cis-1.9/policies.yaml @@ -133,11 +133,11 @@ groups: kubectl get pods --all-namespaces -o custom-columns=POD_NAMESPACE:.metadata.namespace,POD_NAME:.metadata.name,POD_SERVICE_ACCOUNT:.spec.serviceAccount,POD_IS_AUTOMOUNTSERVICEACCOUNTTOKEN:.spec.automountServiceAccountToken --no-headers | while read -r pod_namespace pod_name pod_service_account pod_is_automountserviceaccounttoken do # Retrieve automountServiceAccountToken's value for ServiceAccount and Pod, set to notset if null or . - svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n ${pod_namespace} ${pod_service_account} -o json | jq -r '.automountServiceAccountToken' | sed -e 's//notset/g' -e 's/null/notset/g') - pod_is_automountserviceaccounttoken=$(echo ${pod_is_automountserviceaccounttoken} | sed -e 's//notset/g' -e 's/null/notset/g') - if [[ "${svacc_is_automountserviceaccounttoken}" == "false" && ( "${pod_is_automountserviceaccounttoken}" == "false" || "${pod_is_automountserviceaccounttoken}" == "notset" ) ]]; then + svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n "${pod_namespace}" "${pod_service_account}" -o json | jq -r '.automountServiceAccountToken' | sed -e 's//notset/g' -e 's/null/notset/g') + pod_is_automountserviceaccounttoken=$(echo "${pod_is_automountserviceaccounttoken}" | sed -e 's//notset/g' -e 's/null/notset/g') + if [ "${svacc_is_automountserviceaccounttoken}" = "false" ] && ( [ "${pod_is_automountserviceaccounttoken}" = "false" ] || [ "${pod_is_automountserviceaccounttoken}" = "notset" ] ); then is_compliant="true" - elif [[ "${svacc_is_automountserviceaccounttoken}" == "true" && "${pod_is_automountserviceaccounttoken}" == "false" ]]; then + elif [ "${svacc_is_automountserviceaccounttoken}" = "true" ] && [ "${pod_is_automountserviceaccounttoken}" = "false" ]; then is_compliant="true" else is_compliant="false" From d8f041a826ae984370732bdb951860e25030e833 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 3 Oct 2024 09:20:12 +0600 Subject: [PATCH 244/262] build(deps): bump alpine from 3.20.0 to 3.20.3 (#1676) Bumps alpine from 3.20.0 to 3.20.3. --- updated-dependencies: - dependency-name: alpine dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index b21c9925e..7a272cc28 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,7 +17,7 @@ RUN wget -O kubectl.sha256 "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/li RUN /bin/bash -c 'echo "$( Date: Thu, 3 Oct 2024 13:43:01 -0300 Subject: [PATCH 245/262] Updated KUBECTL_VERSION to 1.31.0 for fixing vulnerabilities (#1690) * Bumped Go to 1.22.7 for fixing Critical/High vulberabilities * Bumped Go to 1.22.7 for fixing Critical/High vulberabilities * Bumped kubectl version for fixing vulnerabilities * Fixed kubectl version * Update go.mod --- makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/makefile b/makefile index a0e01fb51..bcab5c3fa 100644 --- a/makefile +++ b/makefile @@ -11,7 +11,7 @@ uname := $(shell uname -s) BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm,linux/ppc64le,linux/s390x DOCKER_ORGS ?= aquasec public.ecr.aws/aquasecurity GOARCH ?= $@ -KUBECTL_VERSION ?= 1.28.7 +KUBECTL_VERSION ?= 1.31.0 ARCH ?= $(shell go env GOARCH) ifneq ($(findstring Microsoft,$(shell uname -r)),) From c683e939686bfe4c704622704a785962e008bc63 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 4 Oct 2024 12:21:07 +0600 Subject: [PATCH 246/262] build(deps): bump github.com/aws/aws-sdk-go-v2/service/securityhub (#1696) Bumps [github.com/aws/aws-sdk-go-v2/service/securityhub](https://github.com/aws/aws-sdk-go-v2) from 1.53.1 to 1.53.3. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.53.1...service/iot/v1.53.3) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/service/securityhub dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 3 +-- go.sum | 40 ++-------------------------------------- 2 files changed, 3 insertions(+), 40 deletions(-) diff --git a/go.mod b/go.mod index 304aa3c62..38fefa67c 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22 require ( github.com/aws/aws-sdk-go-v2 v1.31.0 github.com/aws/aws-sdk-go-v2/config v1.27.37 - github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.1 + github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.3 github.com/fatih/color v1.16.0 github.com/golang/glog v1.2.0 github.com/magiconair/properties v1.8.7 @@ -53,7 +53,6 @@ require ( github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect - github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect diff --git a/go.sum b/go.sum index 95cda5736..f2fe64ab0 100644 --- a/go.sum +++ b/go.sum @@ -1,61 +1,29 @@ -github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA= -github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U= github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA= -github.com/aws/aws-sdk-go-v2/config v1.27.4 h1:AhfWb5ZwimdsYTgP7Od8E9L1u4sKmDW2ZVeLcf2O42M= -github.com/aws/aws-sdk-go-v2/config v1.27.4/go.mod h1:zq2FFXK3A416kiukwpsd+rD4ny6JC7QSkp4QdN1Mp2g= github.com/aws/aws-sdk-go-v2/config v1.27.37 h1:xaoIwzHVuRWRHFI0jhgEdEGc8xE1l91KaeRDsWEIncU= github.com/aws/aws-sdk-go-v2/config v1.27.37/go.mod h1:S2e3ax9/8KnMSyRVNd3sWTKs+1clJ2f1U6nE0lpvQRg= -github.com/aws/aws-sdk-go-v2/credentials v1.17.4 h1:h5Vztbd8qLppiPwX+y0Q6WiwMZgpd9keKe2EAENgAuI= -github.com/aws/aws-sdk-go-v2/credentials v1.17.4/go.mod h1:+30tpwrkOgvkJL1rUZuRLoxcJwtI/OkeBLYnHxJtVe0= github.com/aws/aws-sdk-go-v2/credentials v1.17.35 h1:7QknrZhYySEB1lEXJxGAmuD5sWwys5ZXNr4m5oEz0IE= github.com/aws/aws-sdk-go-v2/credentials v1.17.35/go.mod h1:8Vy4kk7at4aPSmibr7K+nLTzG6qUQAUO4tW49fzUV4E= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 h1:AK0J8iYBFeUk2Ax7O8YpLtFsfhdOByh2QIkHmigpRYk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2/go.mod h1:iRlGzMix0SExQEviAyptRWRGdYNo3+ufW/lCzvKVTUc= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 h1:bNo4LagzUKbjdxE0tIcR9pMzLR2U/Tgie1Hq1HQ3iH8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2/go.mod h1:wRQv0nN6v9wDXuWThpovGQjqF1HFdcgWjporw14lS8k= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 h1:kYQ3H1u0ANr9KEKlGs/jTLrBFPo8P8NaH/w7A01NeeM= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18/go.mod h1:r506HmK5JDUh9+Mw4CfGJGSSoqIiLCndAuqXuhbv67Y= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24/go.mod h1:gAuCezX/gob6BSMbItsSlMb6WZGV7K2+fWOvk8xBSto= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 h1:EtOU5jsPdIQNP+6Q2C5e3d65NKT1PeCiQk+9OdzO12Q= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2/go.mod h1:tyF5sKccmDz0Bv4NrstEr+/9YkSPJHrcO7UsUKf7pWM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 h1:Z7IdFUONvTcvS7YuhtVxN99v2cCoHRXOS4mTr0B/pUc= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18/go.mod h1:DkKMmksZVVyat+Y+r1dEOgJEfUeA7UngIHWeKsi0yNc= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 h1:QFASJGfT8wMXtuP3D5CRmMjARHv9ZmzFUMJznHDOY3w= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5/go.mod h1:QdZ3OmoIjSX+8D1OPAzPxDfjXASbBMDsz9qvtyIhtik= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 h1:5ffmXjPtwRExp1zc7gENLgCPyHFbhEPwVTkTiH9niSk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2/go.mod h1:Ru7vg1iQ7cR4i7SZ/JTLYN9kaXtbL69UdgG0OQWQxW0= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 h1:Xbwbmk44URTiHNx6PNo0ujDE6ERlsCKJD3u1zfnzAPg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20/go.mod h1:oAfOFzUB14ltPZj1rWwRc3d/6OgD76R8KlvU3EqM9Fg= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 h1:+lpa31bGPPvgpZwUJ4ldKRCsPukzJ0PqoO5AQ9S79oQ= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1/go.mod h1:vKGWzDG4Ytw3hgv/FvNy0HX/XEoJ6k/e7KAANzXWP8Y= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.1 h1:ue90LN6bFGMtmUbR+mka0kL5Up3EiNMHen7TUW3NCNA= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.1/go.mod h1:QFtYEC35t39ftJ6emZgapzdtBjGZsuR4bAd73SiG23I= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 h1:utEGkfdQ4L6YW/ietH7111ZYglLJvS+sLriHJ1NBJEQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.1/go.mod h1:RsYqzYr2F2oPDdpy+PdhephuZxTfjHQe7SOBcZGoAU8= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.3 h1:YSmEnPSWj74eOtbXG4Z2J+GTQjBrz7w2wP01isHFZwU= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.3/go.mod h1:QFtYEC35t39ftJ6emZgapzdtBjGZsuR4bAd73SiG23I= github.com/aws/aws-sdk-go-v2/service/sso v1.23.1 h1:2jrVsMHqdLD1+PA4BA6Nh1eZp0Gsy3mFSB5MxDvcJtU= github.com/aws/aws-sdk-go-v2/service/sso v1.23.1/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 h1:9/GylMS45hGGFCcMrUZDVayQE1jYSIN6da9jo7RAYIw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1/go.mod h1:YjAPFn4kGFqKC54VsHs5fn5B6d+PCY2tziEa3U/GB5Y= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1 h1:0L7yGCg3Hb3YQqnSgBTZM5wepougtL1aEccdcdYhHME= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 h1:3I2cBEYgKhrWlwyZgfpSO2BpaMY1LHPqXYk/QGlu2ew= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.1/go.mod h1:uQ7YYKZt3adCRrdCBREm1CD3efFLOUNH77MrUCvx5oA= github.com/aws/aws-sdk-go-v2/service/sts v1.31.1 h1:8K0UNOkZiK9Uh3HIF6Bx0rcNCftqGCeKmOaR7Gp5BSo= github.com/aws/aws-sdk-go-v2/service/sts v1.31.1/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= -github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/aws/smithy-go v1.21.0 h1:H7L8dtDRk0P1Qm6y0ji7MCYMQObJ5R9CRpyPhRUkLYA= github.com/aws/smithy-go v1.21.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -104,7 +72,6 @@ github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYu github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -132,9 +99,6 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= From 3a0ccc440cb757f9645d5bc11db3be5f1d49c567 Mon Sep 17 00:00:00 2001 From: Arano-kai Date: Fri, 4 Oct 2024 10:42:56 +0300 Subject: [PATCH 247/262] fix: rh-1.0 check 4.1.3 typo (#1652) Co-authored-by: Arano-kai --- cfg/rh-1.0/node.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cfg/rh-1.0/node.yaml b/cfg/rh-1.0/node.yaml index fb982d6f3..fb5a47324 100644 --- a/cfg/rh-1.0/node.yaml +++ b/cfg/rh-1.0/node.yaml @@ -48,7 +48,7 @@ groups: echo "No matching pods found on the current node." else # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" - stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null fi tests: bin_op: or From e8562f2944c5bdba7d400ad934aa15992402b4e8 Mon Sep 17 00:00:00 2001 From: Matthias Muth Date: Fri, 4 Oct 2024 10:08:03 +0200 Subject: [PATCH 248/262] Extend default kubelet configlist to fit AWS EKS (#1637) - the latest default Kubernetes setup of AWS has its kubelet config path in the added location. Proposing to extend the list of scanned paths in order to make kube-bench execution more painless and "quick start like" in default setups. --- cfg/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/cfg/config.yaml b/cfg/config.yaml index cdd3fb8d1..02e0cedd0 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -170,6 +170,7 @@ node: - "/var/lib/kubelet/config.yaml" - "/var/lib/kubelet/config.yml" - "/etc/kubernetes/kubelet/kubelet-config.json" + - "/etc/kubernetes/kubelet/config.json" - "/etc/kubernetes/kubelet/config" - "/home/kubernetes/kubelet-config.yaml" - "/home/kubernetes/kubelet-config.yml" From e47725299e132018c0725233719ea48dcc725973 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Oct 2024 10:37:41 +0600 Subject: [PATCH 249/262] build(deps): bump gorm.io/driver/postgres from 1.5.6 to 1.5.9 (#1698) Bumps [gorm.io/driver/postgres](https://github.com/go-gorm/postgres) from 1.5.6 to 1.5.9. - [Commits](https://github.com/go-gorm/postgres/compare/v1.5.6...v1.5.9) --- updated-dependencies: - dependency-name: gorm.io/driver/postgres dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 38fefa67c..b1670c17b 100644 --- a/go.mod +++ b/go.mod @@ -15,8 +15,8 @@ require ( github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.8.4 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.5.6 - gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde + gorm.io/driver/postgres v1.5.9 + gorm.io/gorm v1.25.10 k8s.io/apimachinery v0.29.3 k8s.io/client-go v0.29.3 ) @@ -49,7 +49,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/pgx/v5 v5.5.4 // indirect + github.com/jackc/pgx/v5 v5.5.5 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect diff --git a/go.sum b/go.sum index f2fe64ab0..f4ef9ecc8 100644 --- a/go.sum +++ b/go.sum @@ -91,8 +91,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8= -github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= +github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= +github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= @@ -277,10 +277,10 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.5.6 h1:ydr9xEd5YAM0vxVDY0X139dyzNz10spDiDlC7+ibLeU= -gorm.io/driver/postgres v1.5.6/go.mod h1:3e019WlBaYI5o5LIdNV+LyxCMNtLOQETBXL2h4chKpA= -gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde h1:9DShaph9qhkIYw7QF91I/ynrr4cOO2PZra2PFD7Mfeg= -gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8= +gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI= +gorm.io/gorm v1.25.10 h1:dQpO+33KalOA+aFYGlK+EfxcI5MbO7EP2yYygwh9h+s= +gorm.io/gorm v1.25.10/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= From a15e8acaa3312ac7c99c32531980b0be6b3a9014 Mon Sep 17 00:00:00 2001 From: Abubakr-Sadik Nii Nai Davis Date: Fri, 11 Oct 2024 04:49:35 +0000 Subject: [PATCH 250/262] Add GKE 1.6 CIS benchmark for GCP environment (#1672) * Add config entries for GKE 1.6 controls * Add gke1.6 control plane recommendations * Add gke-1.6.0 worker node recommendations * Add gke-1.6.0 policy recommendations * Add managed services and policy recommendation * Add master recommendations * Fix formatting across gke-1.6.0 files * Add gke-1.6.0 benchmark selection based on k8s version * Workaround: hardcode kubelet config path for gke-1.6.0 * Fix tests for makeIPTablesUtilChaings * Change scored field for all node tests to true * Fix kubelet file permission to check for --------- Co-authored-by: afdesk --- cfg/config.yaml | 7 + cfg/gke-1.6.0/config.yaml | 9 + cfg/gke-1.6.0/controlplane.yaml | 20 + cfg/gke-1.6.0/managedservices.yaml | 617 +++++++++++++++++++++++++++++ cfg/gke-1.6.0/master.yaml | 6 + cfg/gke-1.6.0/node.yaml | 506 +++++++++++++++++++++++ cfg/gke-1.6.0/policies.yaml | 238 +++++++++++ cmd/util.go | 2 + docs/architecture.md | 1 + docs/platforms.md | 1 + docs/running.md | 3 +- 11 files changed, 1409 insertions(+), 1 deletion(-) create mode 100644 cfg/gke-1.6.0/config.yaml create mode 100644 cfg/gke-1.6.0/controlplane.yaml create mode 100644 cfg/gke-1.6.0/managedservices.yaml create mode 100644 cfg/gke-1.6.0/master.yaml create mode 100644 cfg/gke-1.6.0/node.yaml create mode 100644 cfg/gke-1.6.0/policies.yaml diff --git a/cfg/config.yaml b/cfg/config.yaml index 02e0cedd0..d5d170bbd 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -288,6 +288,7 @@ version_mapping: "eks-1.2.0": "eks-1.2.0" "gke-1.0": "gke-1.0" "gke-1.2.0": "gke-1.2.0" + "gke-1.6.0": "gke-1.6.0" "ocp-3.10": "rh-0.7" "ocp-3.11": "rh-0.7" "ocp-4.0": "rh-1.0" @@ -380,6 +381,12 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "gke-1.6.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" "eks-1.0.1": - "master" - "node" diff --git a/cfg/gke-1.6.0/config.yaml b/cfg/gke-1.6.0/config.yaml new file mode 100644 index 000000000..e15e43f48 --- /dev/null +++ b/cfg/gke-1.6.0/config.yaml @@ -0,0 +1,9 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +node: + proxy: + defaultkubeconfig: "/var/lib/kubelet/kubeconfig" + + kubelet: + defaultconf: "/etc/kubernetes/kubelet/kubelet-config.yaml" diff --git a/cfg/gke-1.6.0/controlplane.yaml b/cfg/gke-1.6.0/controlplane.yaml new file mode 100644 index 000000000..011d86075 --- /dev/null +++ b/cfg/gke-1.6.0/controlplane.yaml @@ -0,0 +1,20 @@ +--- +controls: +version: "gke-1.6.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Authentication and Authorization" + checks: + - id: 2.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + + You can remediate the availability of client certificates in your GKE cluster. See + Recommendation 5.8.1. + scored: false diff --git a/cfg/gke-1.6.0/managedservices.yaml b/cfg/gke-1.6.0/managedservices.yaml new file mode 100644 index 000000000..e82320d67 --- /dev/null +++ b/cfg/gke-1.6.0/managedservices.yaml @@ -0,0 +1,617 @@ +--- +controls: +version: "gke-1.6.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning is enabled (Automated)" + type: "manual" + remediation: | + For Images Hosted in GCR: + Using Command Line: + + gcloud services enable containeranalysis.googleapis.com + + For Images Hosted in AR: + Using Command Line: + + gcloud services enable containerscanning.googleapis.com + scored: false + + - id: 5.1.2 + text: "Minimize user access to Container Image repositories (Manual)" + type: "manual" + remediation: | + For Images Hosted in AR: + Using Command Line: + + gcloud artifacts repositories set-iam-policy \ + --location + + To learn how to configure policy files see: https://cloud.google.com/artifact-registry/docs/access-control#grant + + For Images Hosted in GCR: + Using Command Line: + To change roles at the GCR bucket level: + Firstly, run the following if read permissions are required: + + gsutil iam ch ::objectViewer gs://artifacts..appspot.com + + Then remove the excessively privileged role (Storage Admin / Storage Object + Admin / Storage Object Creator) using: + + gsutil iam ch -d :: gs://artifacts..appspot.com + + where: + can be one of the following: + user, if the is a Google account. + serviceAccount, if specifies a Service account. + can be one of the following: + a Google account (for example, someone@example.com). + a Cloud IAM service account. + + To modify roles defined at the project level and subsequently inherited within the GCR + bucket, or the Service Account User role, extract the IAM policy file, modify it + accordingly and apply it using: + + gcloud projects set-iam-policy + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Container Image repositories (Manual)" + type: "manual" + remediation: | + For Images Hosted in AR: + Using Command Line: + Add artifactregistry.reader role + + gcloud artifacts repositories add-iam-policy-binding \ + --location= \ + --member='serviceAccount:' \ + --role='roles/artifactregistry.reader' + + Remove any roles other than artifactregistry.reader + + gcloud artifacts repositories remove-iam-policy-binding \ + --location \ + --member='serviceAccount:' \ + --role='' + + For Images Hosted in GCR: + For an account explicitly granted to the bucket: + Firstly add read access to the Kubernetes Service Account: + + gsutil iam ch ::objectViewer gs://artifacts..appspot.com + + where: + can be one of the following: + user, if the is a Google account. + serviceAccount, if specifies a Service account. + can be one of the following: + a Google account (for example, someone@example.com). + a Cloud IAM service account. + + Then remove the excessively privileged role (Storage Admin / Storage Object + Admin / Storage Object Creator) using: + + gsutil iam ch -d :: gs://artifacts..appspot.com + + For an account that inherits access to the GCR Bucket through Project level + permissions, modify the Projects IAM policy file accordingly, then upload it using: + + gcloud projects set-iam-policy + scored: false + + - id: 5.1.4 + text: "Ensure only trusted container images are used (Manual)" + type: "manual" + remediation: | + Using Command Line: + Update the cluster to enable Binary Authorization: + + gcloud container cluster update --enable-binauthz + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference: + https://cloud.google.com/binary-authorization/docs/policy-yaml-reference for guidance. + + Import the policy file into Binary Authorization: + + gcloud container binauthz policy import + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Ensure GKE clusters are not running using the Compute Engine default service account (Automated))" + type: "manual" + remediation: | + Using Command Line: + To create a minimally privileged service account: + + gcloud iam service-accounts create \ + --display-name "GKE Node Service Account" + export NODE_SA_EMAIL=gcloud iam service-accounts list \ + --format='value(email)' --filter='displayName:GKE Node Service Account' + + Grant the following roles to the service account: + + export PROJECT_ID=gcloud config get-value project + gcloud projects add-iam-policy-binding --member \ + serviceAccount: --role roles/monitoring.metricWriter + gcloud projects add-iam-policy-binding --member \ + serviceAccount: --role roles/monitoring.viewer + gcloud projects add-iam-policy-binding --member \ + serviceAccount: --role roles/logging.logWriter + + To create a new Node pool using the Service account, run the following command: + + gcloud container node-pools create \ + --service-account=@.iam.gserviceaccount.com \ + --cluster= --zone + + Note: The workloads will need to be migrated to the new Node pool, and the old node + pools that use the default service account should be deleted to complete the + remediation. + scored: false + + - id: 5.2.2 + text: "Prefer using dedicated GCP Service Accounts and Workload Identity (Manual)" + type: "manual" + remediation: | + Using Command Line: + + gcloud container clusters update --zone \ + --workload-pool .svc.id.goog + + Note that existing Node pools are unaffected. New Node pools default to --workload- + metadata-from-node=GKE_METADATA_SERVER. + + Then, modify existing Node pools to enable GKE_METADATA_SERVER: + + gcloud container node-pools update --cluster \ + --zone --workload-metadata=GKE_METADATA + + Workloads may need to be modified in order for them to use Workload Identity as + described within: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity. + Also consider the effects on the availability of hosted workloads as Node pools + are updated. It may be more appropriate to create new Node Pools. + scored: false + + - id: 5.3 + text: "Cloud Key Management Service (Cloud KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using keys managed in Cloud KMS (Automated)" + type: "manual" + remediation: | + To create a key: + Create a key ring: + + gcloud kms keyrings create --location --project \ + + + Create a key: + + gcloud kms keys create --location --keyring \ + --purpose encryption --project + + Grant the Kubernetes Engine Service Agent service account the Cloud KMS + CryptoKey Encrypter/Decrypter role: + + gcloud kms keys add-iam-policy-binding --location \ + --keyring --member serviceAccount: \ + --role roles/cloudkms.cryptoKeyEncrypterDecrypter --project + + To create a new cluster with Application-layer Secrets Encryption: + + gcloud container clusters create --cluster-version=latest \ + --zone \ + --database-encryption-key projects//locations//keyRings//cryptoKeys/ \ + --project + + To enable on an existing cluster: + + gcloud container clusters update --zone \ + --database-encryption-key projects//locations//keyRings//cryptoKeys/ \ + --project + scored: false + + - id: 5.4 + text: "Node Metadata" + checks: + - id: 5.4.1 + text: "Ensure the GKE Metadata Server is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + + gcloud container clusters update --identity-namespace=.svc.id.goog + + Note that existing Node pools are unaffected. New Node pools default to --workload- + metadata-from-node=GKE_METADATA_SERVER. + + To modify an existing Node pool to enable GKE Metadata Server: + + gcloud container node-pools update --cluster= \ + --workload-metadata-from-node=GKE_METADATA_SERVER + + Workloads may need modification in order for them to use Workload Identity as + described within: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity. + scored: false + + - id: 5.5 + text: "Node Configuration and Maintenance" + checks: + - id: 5.5.1 + text: "Ensure Container-Optimized OS (cos_containerd) is used for GKE node images (Automated)" + type: "manual" + remediation: | + Using Command Line: + To set the node image to cos for an existing cluster's Node pool: + + gcloud container clusters upgrade --image-type cos_containerd \ + --zone --node-pool + scored: false + + - id: 5.5.2 + text: "Ensure Node Auto-Repair is enabled for GKE nodes (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable node auto-repair for an existing cluster's Node pool: + + gcloud container node-pools update --cluster \ + --zone --enable-autorepair + scored: false + + - id: 5.5.3 + text: "Ensure Node Auto-Upgrade is enabled for GKE nodes (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable node auto-upgrade for an existing cluster's Node pool, run the following + command: + + gcloud container node-pools update --cluster \ + --zone --enable-autoupgrade + scored: false + + - id: 5.5.4 + text: "When creating New Clusters - Automate GKE version management using Release Channels (Automated)" + type: "manual" + remediation: | + Using Command Line: + Create a new cluster by running the following command: + + gcloud container clusters create --zone \ + --release-channel + + where is stable or regular, according to requirements. + scored: false + + - id: 5.5.5 + text: "Ensure Shielded GKE Nodes are Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To migrate an existing cluster, the flag --enable-shielded-nodes needs to be + specified in the cluster update command: + + gcloud container clusters update --zone \ + --enable-shielded-nodes + scored: false + + - id: 5.5.6 + text: "Ensure Integrity Monitoring for Shielded GKE Nodes is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Integrity Monitoring enabled, run the + following command: + + gcloud container node-pools create --cluster \ + --zone --shielded-integrity-monitoring + + Workloads from existing non-conforming Node pools will need to be migrated to the + newly created Node pool, then delete non-conforming Node pools to complete the + remediation + scored: false + + - id: 5.5.7 + text: "Ensure Secure Boot for Shielded GKE Nodes is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Secure Boot enabled, run the following + command: + + gcloud container node-pools create --cluster \ + --zone --shielded-secure-boot + + Workloads will need to be migrated from existing non-conforming Node pools to the + newly created Node pool, then delete the non-conforming pools. + scored: false + + - id: 5.6 + text: "Cluster Networking" + checks: + - id: 5.6.1 + text: "Enable VPC Flow Logs and Intranode Visibility (Automated)" + type: "manual" + remediation: | + Using Command Line: + 1. Find the subnetwork name associated with the cluster. + + gcloud container clusters describe \ + --region - -format json | jq '.subnetwork' + + 2. Update the subnetwork to enable VPC Flow Logs. + gcloud compute networks subnets update --enable-flow-logs + scored: false + + - id: 5.6.2 + text: "Ensure use of VPC-native clusters (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable Alias IP on a new cluster, run the following command: + + gcloud container clusters create --zone \ + --enable-ip-alias + + If using Autopilot configuration mode: + + gcloud container clusters create-auto \ + --zone + scored: false + + - id: 5.6.3 + text: "Ensure Control Plane Authorized Networks is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable Control Plane Authorized Networks for an existing cluster, run the following + command: + + gcloud container clusters update --zone \ + --enable-master-authorized-networks + + Along with this, you can list authorized networks using the --master-authorized-networks + flag which contains a list of up to 20 external networks that are allowed to + connect to your cluster's control plane through HTTPS. You provide these networks as + a comma-separated list of addresses in CIDR notation (such as 90.90.100.0/24). + scored: false + + - id: 5.6.4 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: | + Using Command Line: + Create a cluster with a Private Endpoint enabled and Public Access disabled by including + the --enable-private-endpoint flag within the cluster create command: + + gcloud container clusters create --enable-private-endpoint + + Setting this flag also requires the setting of --enable-private-nodes, --enable-ip-alias + and --master-ipv4-cidr=. + scored: false + + - id: 5.6.5 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: | + Using Command Line: + To create a cluster with Private Nodes enabled, include the --enable-private-nodes + flag within the cluster create command: + + gcloud container clusters create --enable-private-nodes + + Setting this flag also requires the setting of --enable-ip-alias and + --master-ipv4-cidr=. + scored: false + + - id: 5.6.6 + text: "Consider firewalling GKE worker nodes (Manual)" + type: "manual" + remediation: | + Using Command Line: + Use the following command to generate firewall rules, setting the variables as + appropriate: + + gcloud compute firewall-rules create \ + --network --priority --direction \ + --action --target-tags \ + --target-service-accounts \ + --source-ranges --source-tags \ + --source-service-accounts \ + --destination-ranges --rules + scored: false + + - id: 5.6.7 + text: "Ensure use of Google-managed SSL Certificates (Automated)" + type: "manual" + remediation: | + If services of type:LoadBalancer are discovered, consider replacing the Service with + an Ingress. + + To configure the Ingress and use Google-managed SSL certificates, follow the + instructions as listed at: https://cloud.google.com/kubernetes-engine/docs/how- + to/managed-certs. + scored: false + + - id: 5.7 + text: "Logging" + checks: + - id: 5.7.1 + text: "Ensure Logging and Cloud Monitoring is Enabled (Automated)" + type: "manual" + remediation: | + To enable Logging for an existing cluster, run the following command: + gcloud container clusters update --zone \ + --logging= + + See https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--logging + for a list of available components for logging. + + To enable Cloud Monitoring for an existing cluster, run the following command: + gcloud container clusters update --zone \ + --monitoring= + + See https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#-- + monitoring for a list of available components for Cloud Monitoring. + scored: false + + - id: 5.7.2 + text: "Enable Linux auditd logging (Manual)" + type: "manual" + remediation: | + Using Command Line: + Download the example manifests: + curl https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-node-tools/master/os-audit/cos-auditd-logging.yaml > cos-auditd-logging.yaml + + Edit the example manifests if needed. Then, deploy them: + kubectl apply -f cos-auditd-logging.yaml + + Verify that the logging Pods have started. If a different Namespace was defined in the + manifests, replace cos-auditd with the name of the namespace being used: + kubectl get pods --namespace=cos-auditd + scored: false + + - id: 5.8 + text: "Authentication and Authorization" + checks: + - id: 5.8.1 + text: "Ensure authentication using Client Certificates is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + Create a new cluster without a Client Certificate: + gcloud container clusters create [CLUSTER_NAME] \ + --no-issue-client-certificate + scored: false + + - id: 5.8.2 + text: "Manage Kubernetes RBAC users with Google Groups for GKE (Manual)" + type: "manual" + remediation: | + Using Command Line: + Follow the G Suite Groups instructions at: https://cloud.google.com/kubernetes- + engine/docs/how-to/role-based-access-control#google-groups-for-gke. + + Then, create a cluster with: + gcloud container clusters create --security-group + + Finally create Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings that + reference the G Suite Groups. + scored: false + + - id: 5.8.3 + text: "Ensure Legacy Authorization (ABAC) is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To disable Legacy Authorization for an existing cluster, run the following command: + gcloud container clusters update --zone \ + --no-enable-legacy-authorization + scored: false + + - id: 5.9 + text: "Storage" + checks: + - id: 5.9.1 + text: "Enable Customer-Managed Encryption Keys (CMEK) for GKE Persistent Disks (PD) (Manual)" + type: "manual" + remediation: | + Using Command Line: + Follow the instructions detailed at: https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek. + scored: false + + - id: 5.9.2 + text: "Enable Customer-Managed Encryption Keys (CMEK) for Boot Disks (Automated)" + type: "manual" + remediation: | + Using Command Line: + Create a new node pool using customer-managed encryption keys for the node boot + disk, of either pd-standard or pd-ssd: + gcloud container node-pools create --disk-type \ + --boot-disk-kms-key projects//locations//keyRings//cryptoKeys/ + + Create a cluster using customer-managed encryption keys for the node boot disk, of + either pd-standard or pd-ssd: + gcloud container clusters create --disk-type \ + --boot-disk-kms-key projects//locations//keyRings//cryptoKeys/ + scored: false + + - id: 5.10 + text: "Other Cluster Configurations" + checks: + - id: 5.10.1 + text: "Ensure Kubernetes Web UI is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To disable the Kubernetes Dashboard on an existing cluster, run the following + command: + gcloud container clusters update --zone \ + --update-addons=KubernetesDashboard=DISABLED + scored: false + + - id: 5.10.2 + text: "Ensure that Alpha clusters are not used for production workloads (Automated)" + type: "manual" + remediation: | + Using Command Line: + Upon creating a new cluster + gcloud container clusters create [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] + + Do not use the --enable-kubernetes-alpha argument. + scored: false + + - id: 5.10.3 + text: "Consider GKE Sandbox for running untrusted workloads (Manual)" + type: "manual" + remediation: | + Using Command Line: + To enable GKE Sandbox on an existing cluster, a new Node pool must be created, + which can be done using: + gcloud container node-pools create --zone \ + --cluster --image-type=cos_containerd --sandbox="type=gvisor" + scored: false + + - id: 5.10.4 + text: "Ensure use of Binary Authorization (Automated)" + type: "manual" + remediation: | + Using Command Line: + Update the cluster to enable Binary Authorization: + gcloud container cluster update --zone \ + --binauthz-evaluation-mode= + + Example: + gcloud container clusters update $CLUSTER_NAME --zone $COMPUTE_ZONE \ + --binauthz-evaluation-mode=PROJECT_SINGLETON_POLICY_ENFORCE + + See: https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--binauthz-evaluation-mode + for more details around the evaluation modes available. + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference: + https://cloud.google.com/binary-authorization/docs/policy-yaml-reference for guidance. + + Import the policy file into Binary Authorization: + gcloud container binauthz policy import + scored: false + + - id: 5.10.5 + text: "Enable Security Posture (Manual)" + type: "manual" + remediation: | + Enable security posture via the UI, gCloud or API. + https://cloud.google.com/kubernetes-engine/docs/how-to/protect-workload-configuration + scored: false diff --git a/cfg/gke-1.6.0/master.yaml b/cfg/gke-1.6.0/master.yaml new file mode 100644 index 000000000..9686bf2f8 --- /dev/null +++ b/cfg/gke-1.6.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "gke-1.6.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/gke-1.6.0/node.yaml b/cfg/gke-1.6.0/node.yaml new file mode 100644 index 000000000..bac80720e --- /dev/null +++ b/cfg/gke-1.6.0/node.yaml @@ -0,0 +1,506 @@ +--- +controls: +version: "gke-1.6.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on each worker node. + For example, + + chmod 644 $proxykubeconfig + scored: true + + - id: 3.1.2 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on each worker node. + For example: + + chown root:root $proxykubeconfig + scored: true + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 600 (Manual)" + audit: '/bin/sh -c ''if test -e /home/kubernetes/kubelet-config.yaml; then stat -c permissions=%a /home/kubernetes/kubelet-config.yaml; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the kubelet config file location) + + chmod 644 /home/kubernetes/kubelet-config.yaml + scored: true + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e /home/kubernetes/kubelet-config.yaml; then stat -c %U:%G /home/kubernetes/kubelet-config.yaml; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identied in the Audit step) + + chown root:root /home/kubernetes/kubelet-config.yaml + scored: true + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the Anonymous Auth is Not Enabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + Remediation Method 1: + If configuring via the Kubelet config file, you first need to locate the file. + To do this, SSH to each node and execute the following command to find the kubelet + process: + + ps -ef | grep kubelet + + The output of the above command provides details of the active kubelet process, from + which we can see the location of the configuration file provided to the kubelet service + with the --config argument. The file can be viewed with a command such as more or + less, like so: + + sudo less /home/kubernetes/kubelet-config.yaml + + Disable Anonymous Authentication by setting the following parameter: + + "authentication": { "anonymous": { "enabled": false } } + + Remediation Method 2: + If using executable arguments, edit the kubelet service file on each worker node and + ensure the below parameters are part of the KUBELET_ARGS variable string. + + For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or + Bottlerocket AMIs, then this file can be found at + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise, + you may need to look up documentation for your chosen operating system to determine + which service manager is configured: + + --anonymous-auth=false + + For Both Remediation Steps: + Based on your system, restart the kubelet service and check the service status. + The following example is for operating systems using systemd, such as the Amazon + EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl + command. If systemctl is not available then you will need to look up documentation for + your chosen operating system to determine which service manager is configured: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + Remediation Method 1: + If configuring via the Kubelet config file, you first need to locate the file. + To do this, SSH to each node and execute the following command to find the kubelet + process: + + ps -ef | grep kubelet + + The output of the above command provides details of the active kubelet process, from + which we can see the location of the configuration file provided to the kubelet service + with the --config argument. The file can be viewed with a command such as more or + less, like so: + + sudo less /path/to/kubelet-config.json + + Enable Webhook Authentication by setting the following parameter: + + "authentication": { "webhook": { "enabled": true } } + + Next, set the Authorization Mode to Webhook by setting the following parameter: + + "authorization": { "mode": "Webhook } + + Finer detail of the authentication and authorization fields can be found in the + Kubelet Configuration documentation (https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/). + + Remediation Method 2: + If using executable arguments, edit the kubelet service file on each worker node and + ensure the below parameters are part of the KUBELET_ARGS variable string. + For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or + Bottlerocket AMIs, then this file can be found at + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise, + you may need to look up documentation for your chosen operating system to determine + which service manager is configured: + + --authentication-token-webhook + --authorization-mode=Webhook + + For Both Remediation Steps: + Based on your system, restart the kubelet service and check the service status. + The following example is for operating systems using systemd, such as the Amazon + EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl + command. If systemctl is not available then you will need to look up documentation for + your chosen operating system to determine which service manager is configured: + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.3 + text: "Ensure that a Client CA File is Configured (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + Remediation Method 1: + If configuring via the Kubelet config file, you first need to locate the file. + To do this, SSH to each node and execute the following command to find the kubelet + process: + + ps -ef | grep kubelet + + The output of the above command provides details of the active kubelet process, from + which we can see the location of the configuration file provided to the kubelet service + with the --config argument. The file can be viewed with a command such as more or + less, like so: + + sudo less /path/to/kubelet-config.json + + Configure the client certificate authority file by setting the following parameter + appropriately: + + "authentication": { "x509": {"clientCAFile": } }" + + Remediation Method 2: + If using executable arguments, edit the kubelet service file on each worker node and + ensure the below parameters are part of the KUBELET_ARGS variable string. + For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or + Bottlerocket AMIs, then this file can be found at + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise, + you may need to look up documentation for your chosen operating system to determine + which service manager is configured: + + --client-ca-file= + + For Both Remediation Steps: + Based on your system, restart the kubelet service and check the service status. + The following example is for operating systems using systemd, such as the Amazon + EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl + command. If systemctl is not available then you will need to look up documentation for + your chosen operating system to determine which service manager is configured: + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is disabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + bin_op: or + remediation: | + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to 0 + + "readOnlyPort": 0 + + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --read-only-port=0 + + For each remediation: + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet-config.yaml and set the below parameter to a non-zero + value in the format of #h#m#s + + "streamingConnectionIdleTimeout": "4h0m0s" + + You should ensure that the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not + specify a --streaming-connection-idle-timeout argument because it would + override the Kubelet config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --streaming-connection-idle-timeout=4h0m0s + + Remediation Method 3: + If using the api configz endpoint consider searching for the status of + "streamingConnectionIdleTimeout": by extracting the live configuration from the + nodes running kubelet. + **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a + Live Cluster (https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), + and then rerun the curl statement from audit process to check for kubelet + configuration changes + + kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from + "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + + For all three remediations: + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-utils-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to + true + + "makeIPTablesUtilChains": true + + Ensure that /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf + does not set the --make-iptables-util-chains argument because that would + override your Kubelet config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --make-iptables-util-chains:true + + Remediation Method 3: + If using the api configz endpoint consider searching for the status of + "makeIPTablesUtilChains.: true by extracting the live configuration from the nodes + running kubelet. + + **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a + Live Cluster (https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), + and then rerun the curl statement from audit process to check for kubelet + configuration changes + + kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from + "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + + For all three remediations: + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.7 + text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /etc/systemd/system/kubelet.service.d/10-kubeadm.conf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + + If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + + Based on your system, restart the kubelet service. For example: + + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.8 + text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.yaml file + /etc/kubernetes/kubelet/kubelet-config.yaml and set the below parameter to + true + + "RotateCertificate":true + + Additionally, ensure that the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate + executable argument to false because this would override the Kubelet + config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --RotateCertificate=true + scored: true + + - id: 3.2.9 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: eq + value: true + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet-config.yaml and set the below parameter to true + + "featureGates": { + "RotateKubeletServerCertificate":true + }, + + Additionally, ensure that the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set + the --rotate-kubelet-server-certificate executable argument to false because + this would override the Kubelet config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --rotate-kubelet-server-certificate=true + + Remediation Method 3: + If using the api configz endpoint consider searching for the status of + "RotateKubeletServerCertificate": by extracting the live configuration from the + nodes running kubelet. + **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a + Live Cluster (https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), + and then rerun the curl statement from audit process to check for kubelet + configuration changes + + kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from + "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + + For all three remediation methods: + Restart the kubelet service and check status. The example below is for when using + systemctl to manage services: + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true diff --git a/cfg/gke-1.6.0/policies.yaml b/cfg/gke-1.6.0/policies.yaml new file mode 100644 index 000000000..333335536 --- /dev/null +++ b/cfg/gke-1.6.0/policies.yaml @@ -0,0 +1,238 @@ +--- +controls: +version: "gke-1.6.0" +id: 4 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Automated)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Automated)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Automated)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Ensure that default service accounts are not actively used (Automated)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific + access to the Kubernetes API server. + + Modify the configuration of each default service account to include this value + + automountServiceAccountToken: false + scored: false + + - id: 4.1.5 + text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.1.6 + text: "Avoid use of system:masters group (Automated)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 4.1.7 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 4.1.8 + text: "Avoid bindings to system:anonymous (Automated)" + type: "manual" + remediation: | + Identify all clusterrolebindings and rolebindings to the user system:anonymous. + Check if they are used and review the permissions associated with the binding using the + commands in the Audit section above or refer to GKE documentation + (https://cloud.google.com/kubernetes-engine/docs/best-practices/rbac#detect-prevent-default). + + Strongly consider replacing unsafe bindings with an authenticated, user-defined group. + Where possible, bind to non-default, user-defined groups with least-privilege roles. + + If there are any unsafe bindings to the user system:anonymous, proceed to delete them + after consideration for cluster operations with only necessary, safer bindings. + + kubectl delete clusterrolebinding [CLUSTER_ROLE_BINDING_NAME] + kubectl delete rolebinding [ROLE_BINDING_NAME] --namespace [ROLE_BINDING_NAMESPACE] + scored: false + + - id: 4.1.9 + text: "Avoid non-default bindings to system:unauthenticated (Automated)" + type: "manual" + remediation: | + Identify all non-default clusterrolebindings and rolebindings to the group + system:unauthenticated. Check if they are used and review the permissions + associated with the binding using the commands in the Audit section above or refer to + GKE documentation (https://cloud.google.com/kubernetes-engine/docs/best-practices/rbac#detect-prevent-default). + + Strongly consider replacing non-default, unsafe bindings with an authenticated, user- + defined group. Where possible, bind to non-default, user-defined groups with least- + privilege roles. + + If there are any non-default, unsafe bindings to the group system:unauthenticated, + proceed to delete them after consideration for cluster operations with only necessary, + safer bindings. + + kubectl delete clusterrolebinding [CLUSTER_ROLE_BINDING_NAME] + kubectl delete rolebinding [ROLE_BINDING_NAME] --namespace [ROLE_BINDING_NAMESPACE] + scored: false + + - id: 4.1.10 + text: "Avoid non-default bindings to system:authenticated (Automated)" + type: "manual" + remediation: | + Identify all non-default clusterrolebindings and rolebindings to the group + system:authenticated. Check if they are used and review the permissions associated + with the binding using the commands in the Audit section above or refer to GKE + documentation. + + Strongly consider replacing non-default, unsafe bindings with an authenticated, user- + defined group. Where possible, bind to non-default, user-defined groups with least- + privilege roles. + + If there are any non-default, unsafe bindings to the group system:authenticated, + proceed to delete them after consideration for cluster operations with only necessary, + safer bindings. + + kubectl delete clusterrolebinding [CLUSTER_ROLE_BINDING_NAME] + kubectl delete rolebinding [ROLE_BINDING_NAME] --namespace [ROLE_BINDING_NAMESPACE] + scored: false + + - id: 4.2 + text: "Pod Security Standards" + checks: + - id: 4.2.1 + text: "Ensure that the cluster enforces Pod Security Standard Baseline profile or stricter for all namespaces. (Manual)" + type: "manual" + remediation: | + Ensure that Pod Security Admission is in place for every namespace which contains + user workloads. + Run the following command to enforce the Baseline profile in a namespace: + + kubectl label namespace pod-security.kubernetes.io/enforce=baseline + scored: false + + - id: 4.3 + text: "Network Policies and CNI" + checks: + - id: 4.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + To use a CNI plugin with Network Policy, enable Network Policy in GKE, and the CNI plugin + will be updated. See Recommendation 5.6.7. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Automated)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as needed. + See: https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy#creating_a_network_policy + for more information. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Automated)" + type: "manual" + remediation: | + if possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "Extensible Admission Control" + checks: + - id: 4.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + Also see recommendation 5.10.4. + scored: false + + - id: 4.6 + text: "General Policies" + checks: + - id: 4.6.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.6.2 + text: "Ensure that the seccomp profile is set to RuntimeDefault in your pod definitions (Automated)" + type: "manual" + remediation: | + Use security context to enable the RuntimeDefault seccomp profile in your pod + definitions. An example is as below: + + { + "namespace": "kube-system", + "name": "metrics-server-v0.7.0-dbcc8ddf6-gz7d4", + "seccompProfile": "RuntimeDefault" + } + scored: false + + - id: 4.6.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Google Container- + Optimized OS Benchmark. + scored: false + + - id: 4.6.4 + text: "The default namespace should not be used (Automated)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cmd/util.go b/cmd/util.go index 95c0b2639..275de2326 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -494,6 +494,8 @@ func getPlatformBenchmarkVersion(platform Platform) string { switch platform.Version { case "1.15", "1.16", "1.17", "1.18", "1.19": return "gke-1.0" + case "1.29", "1.30", "1.31": + return "gke-1.6.0" default: return "gke-1.2.0" } diff --git a/docs/architecture.md b/docs/architecture.md index ef17ca6f6..c65978f0a 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -25,6 +25,7 @@ The following table shows the valid targets based on the CIS Benchmark version. | cis-1.9 | master, controlplane, node, etcd, policies | | gke-1.0 | master, controlplane, node, etcd, policies, managedservices | | gke-1.2.0 | controlplane, node, policies, managedservices | +| gke-1.6.0 | controlplane, node, policies, managedservices | | eks-1.0.1 | controlplane, node, policies, managedservices | | eks-1.1.0 | controlplane, node, policies, managedservices | | eks-1.2.0 | controlplane, node, policies, managedservices | diff --git a/docs/platforms.md b/docs/platforms.md index e907609ad..d6fbcf712 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -20,6 +20,7 @@ Some defined by other hardenening guides. | CIS | [1.9](https://workbench.cisecurity.org/benchmarks/16828) | cis-1.9 | 1.27-1.29 | | CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | | CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | +| CIS | [GKE 1.6.0](https://workbench.cisecurity.org/benchmarks/16093) | gke-1.6.0 | GKE | | CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | | CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS | | CIS | [EKS 1.2.0](https://workbench.cisecurity.org/benchmarks/9681) | eks-1.2.0 | EKS | diff --git a/docs/running.md b/docs/running.md index e3eb06fd0..c482a78b6 100644 --- a/docs/running.md +++ b/docs/running.md @@ -154,8 +154,9 @@ oc apply -f job.yaml | ------------- | ----------------------------------------------------------- | | gke-1.0 | master, controlplane, node, etcd, policies, managedservices | | gke-1.2.0 | master, controlplane, node, policies, managedservices | +| gke-1.6.0 | master, controlplane, node, policies, managedservices | -kube-bench includes benchmarks for GKE. To run this you will need to specify `--benchmark gke-1.0` or `--benchmark gke-1.2.0` when you run the `kube-bench` command. +kube-bench includes benchmarks for GKE. To run this you will need to specify `--benchmark gke-1.0`, `--benchmark gke-1.2.0` or `--benchmark gke-1.6.0` when you run the `kube-bench` command. To run the benchmark as a job in your GKE cluster apply the included `job-gke.yaml`. From 1d8f80e846c24e81d90a66ba0002c245921bd0f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 11:14:14 +0600 Subject: [PATCH 251/262] build(deps): bump github.com/golang/glog from 1.2.0 to 1.2.2 (#1702) Bumps [github.com/golang/glog](https://github.com/golang/glog) from 1.2.0 to 1.2.2. - [Release notes](https://github.com/golang/glog/releases) - [Commits](https://github.com/golang/glog/compare/v1.2.0...v1.2.2) --- updated-dependencies: - dependency-name: github.com/golang/glog dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b1670c17b..937a791e0 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/aws/aws-sdk-go-v2/config v1.27.37 github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.3 github.com/fatih/color v1.16.0 - github.com/golang/glog v1.2.0 + github.com/golang/glog v1.2.2 github.com/magiconair/properties v1.8.7 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index f4ef9ecc8..c559d0258 100644 --- a/go.sum +++ b/go.sum @@ -55,8 +55,8 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= From fa478ce238fd1f8063d1cce42077fa4bf32102c7 Mon Sep 17 00:00:00 2001 From: Omar kamoun <35850565+Nezz7@users.noreply.github.com> Date: Wed, 16 Oct 2024 06:50:10 +0100 Subject: [PATCH 252/262] fix: correct TLSCipherSuites to tlsCipherSuites (#1703) --- cfg/ack-1.0/node.yaml | 2 +- cfg/cis-1.20/node.yaml | 2 +- cfg/cis-1.23/node.yaml | 2 +- cfg/cis-1.24-microk8s/node.yaml | 2 +- cfg/cis-1.24/node.yaml | 2 +- cfg/cis-1.5/node.yaml | 2 +- cfg/cis-1.6/node.yaml | 2 +- cfg/cis-1.7/node.yaml | 2 +- cfg/cis-1.8/node.yaml | 2 +- cfg/cis-1.9/node.yaml | 2 +- cfg/k3s-cis-1.23/node.yaml | 2 +- cfg/k3s-cis-1.24/node.yaml | 2 +- cfg/k3s-cis-1.7/node.yaml | 2 +- cfg/k3s-cis-1.8/node.yaml | 2 +- cfg/rke-cis-1.23/node.yaml | 2 +- cfg/rke-cis-1.24/node.yaml | 2 +- cfg/rke-cis-1.7/node.yaml | 2 +- cfg/rke2-cis-1.23/node.yaml | 2 +- cfg/rke2-cis-1.24/node.yaml | 2 +- cfg/rke2-cis-1.7/node.yaml | 2 +- cfg/tkgi-1.2.53/node.yaml | 2 +- integration/testdata/Expected_output.data | 2 +- 22 files changed, 22 insertions(+), 22 deletions(-) diff --git a/cfg/ack-1.0/node.yaml b/cfg/ack-1.0/node.yaml index 3872880c7..c7da0fbe6 100644 --- a/cfg/ack-1.0/node.yaml +++ b/cfg/ack-1.0/node.yaml @@ -377,7 +377,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set TLSCipherSuites: to + If using a Kubelet config file, edit the file to set tlsCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/cis-1.20/node.yaml b/cfg/cis-1.20/node.yaml index f2afb76fc..c6bc46e57 100644 --- a/cfg/cis-1.20/node.yaml +++ b/cfg/cis-1.20/node.yaml @@ -448,7 +448,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set TLSCipherSuites: to + If using a Kubelet config file, edit the file to set tlsCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/cis-1.23/node.yaml b/cfg/cis-1.23/node.yaml index affdbf99f..08d941acc 100644 --- a/cfg/cis-1.23/node.yaml +++ b/cfg/cis-1.23/node.yaml @@ -447,7 +447,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/cis-1.24-microk8s/node.yaml b/cfg/cis-1.24-microk8s/node.yaml index 55dc5c670..8da12b3c0 100644 --- a/cfg/cis-1.24-microk8s/node.yaml +++ b/cfg/cis-1.24-microk8s/node.yaml @@ -445,7 +445,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/cis-1.24/node.yaml b/cfg/cis-1.24/node.yaml index 3649428af..20f75a072 100644 --- a/cfg/cis-1.24/node.yaml +++ b/cfg/cis-1.24/node.yaml @@ -451,7 +451,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/cis-1.5/node.yaml b/cfg/cis-1.5/node.yaml index 8823598f2..4f219c7fb 100644 --- a/cfg/cis-1.5/node.yaml +++ b/cfg/cis-1.5/node.yaml @@ -472,7 +472,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set TLSCipherSuites: to + If using a Kubelet config file, edit the file to set tlsCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/cis-1.6/node.yaml b/cfg/cis-1.6/node.yaml index 157eecce5..f6e41f0ee 100644 --- a/cfg/cis-1.6/node.yaml +++ b/cfg/cis-1.6/node.yaml @@ -452,7 +452,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set TLSCipherSuites: to + If using a Kubelet config file, edit the file to set tlsCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/cis-1.7/node.yaml b/cfg/cis-1.7/node.yaml index de1c29c2d..29d6816b6 100644 --- a/cfg/cis-1.7/node.yaml +++ b/cfg/cis-1.7/node.yaml @@ -429,7 +429,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/cis-1.8/node.yaml b/cfg/cis-1.8/node.yaml index 66e7697ec..ac304e6f2 100644 --- a/cfg/cis-1.8/node.yaml +++ b/cfg/cis-1.8/node.yaml @@ -429,7 +429,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/cis-1.9/node.yaml b/cfg/cis-1.9/node.yaml index bd7cc1f68..d4fa57a60 100644 --- a/cfg/cis-1.9/node.yaml +++ b/cfg/cis-1.9/node.yaml @@ -429,7 +429,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/k3s-cis-1.23/node.yaml b/cfg/k3s-cis-1.23/node.yaml index bedfe928a..9c20b229e 100644 --- a/cfg/k3s-cis-1.23/node.yaml +++ b/cfg/k3s-cis-1.23/node.yaml @@ -477,7 +477,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/k3s-cis-1.24/node.yaml b/cfg/k3s-cis-1.24/node.yaml index 2b6c0dfa8..a9f1e0386 100644 --- a/cfg/k3s-cis-1.24/node.yaml +++ b/cfg/k3s-cis-1.24/node.yaml @@ -417,7 +417,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `TLSCipherSuites` to + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `tlsCipherSuites` to kubelet-arg: - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" or to a subset of these values. diff --git a/cfg/k3s-cis-1.7/node.yaml b/cfg/k3s-cis-1.7/node.yaml index bb23b5e14..b873aea68 100644 --- a/cfg/k3s-cis-1.7/node.yaml +++ b/cfg/k3s-cis-1.7/node.yaml @@ -397,7 +397,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `TLSCipherSuites` to + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `tlsCipherSuites` to kubelet-arg: - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" or to a subset of these values. diff --git a/cfg/k3s-cis-1.8/node.yaml b/cfg/k3s-cis-1.8/node.yaml index eef9414de..7a238a1a8 100644 --- a/cfg/k3s-cis-1.8/node.yaml +++ b/cfg/k3s-cis-1.8/node.yaml @@ -397,7 +397,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `TLSCipherSuites` to + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `tlsCipherSuites` to kubelet-arg: - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" or to a subset of these values. diff --git a/cfg/rke-cis-1.23/node.yaml b/cfg/rke-cis-1.23/node.yaml index ea13f945d..a509ed743 100644 --- a/cfg/rke-cis-1.23/node.yaml +++ b/cfg/rke-cis-1.23/node.yaml @@ -453,7 +453,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/rke-cis-1.24/node.yaml b/cfg/rke-cis-1.24/node.yaml index ca5dcc1ad..653f1b754 100644 --- a/cfg/rke-cis-1.24/node.yaml +++ b/cfg/rke-cis-1.24/node.yaml @@ -446,7 +446,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/rke-cis-1.7/node.yaml b/cfg/rke-cis-1.7/node.yaml index ff5731732..abc9d12af 100644 --- a/cfg/rke-cis-1.7/node.yaml +++ b/cfg/rke-cis-1.7/node.yaml @@ -436,7 +436,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/rke2-cis-1.23/node.yaml b/cfg/rke2-cis-1.23/node.yaml index bbb015f32..b7d97a0e3 100644 --- a/cfg/rke2-cis-1.23/node.yaml +++ b/cfg/rke2-cis-1.23/node.yaml @@ -453,7 +453,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/rke2-cis-1.24/node.yaml b/cfg/rke2-cis-1.24/node.yaml index 8ecce99c0..dcbe85793 100644 --- a/cfg/rke2-cis-1.24/node.yaml +++ b/cfg/rke2-cis-1.24/node.yaml @@ -453,7 +453,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/rke2-cis-1.7/node.yaml b/cfg/rke2-cis-1.7/node.yaml index 155aef7d4..a390e39a5 100644 --- a/cfg/rke2-cis-1.7/node.yaml +++ b/cfg/rke2-cis-1.7/node.yaml @@ -432,7 +432,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/tkgi-1.2.53/node.yaml b/cfg/tkgi-1.2.53/node.yaml index 8e0f09591..9fafab944 100644 --- a/cfg/tkgi-1.2.53/node.yaml +++ b/cfg/tkgi-1.2.53/node.yaml @@ -405,7 +405,7 @@ groups: op: regex value: (TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256|TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256|TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305|TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384|TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305|TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384|TLS_RSA_WITH_AES_256_GCM_SHA384|TLS_RSA_WITH_AES_128_GCM_SHA256) remediation: | - If using a Kubelet config file, edit the file to set TLSCipherSuites: to + If using a Kubelet config file, edit the file to set tlsCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/integration/testdata/Expected_output.data b/integration/testdata/Expected_output.data index d0cfe93fc..934f90ec6 100644 --- a/integration/testdata/Expected_output.data +++ b/integration/testdata/Expected_output.data @@ -281,7 +281,7 @@ Based on your system, restart the kubelet service. For example: systemctl daemon-reload systemctl restart kubelet.service -4.2.13 If using a Kubelet config file, edit the file to set TLSCipherSuites: to +4.2.13 If using a Kubelet config file, edit the file to set tlsCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file From c5dc28ee6f13e8f7250146ad9adb285588ec79e9 Mon Sep 17 00:00:00 2001 From: afdesk Date: Wed, 16 Oct 2024 19:48:17 +0600 Subject: [PATCH 253/262] release: prepare v0.9.1 (#1705) --- job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/job.yaml b/job.yaml index 6c7a61bea..1a22637ff 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:v0.8.0 + image: docker.io/aquasec/kube-bench:v0.9.1 name: kube-bench volumeMounts: - name: var-lib-cni From e9ea1dbb744748797b1bdcdebf760a39c0d8cae9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Oct 2024 15:47:43 +0600 Subject: [PATCH 254/262] build(deps): bump golangci/golangci-lint-action from 4 to 5 (#1604) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 4 to 5. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v4...v5) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4d344b9b2..f3b9a19a9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -32,7 +32,7 @@ jobs: - name: yaml-lint uses: ibiqlik/action-yamllint@v3 - name: Setup golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v5 with: version: v1.57.2 args: --verbose From 0e3dbfa985c0dcc66b5e5155fd96932c671bdc6c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Oct 2024 23:30:31 +0600 Subject: [PATCH 255/262] build(deps): bump docker/build-push-action from 5 to 6 (#1631) Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/publish.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 027941c5e..ce8861a79 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -53,7 +53,7 @@ jobs: echo "KUBECTL_VERSION=$(grep -oP '^KUBECTL_VERSION\s*\?=\s*\K.*' makefile)" >> $GITHUB_ENV - name: Build and push - Docker/ECR id: docker_build - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x @@ -72,7 +72,7 @@ jobs: - name: Build and push ubi image - Docker/ECR id: docker_build_ubi - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x @@ -92,7 +92,7 @@ jobs: - name: Build and push fips ubi image - Docker/ECR id: docker_build_fips_ubi - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x From d5ba5edca046a4751278bcb9b6f590c24085327e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 11:30:08 +0600 Subject: [PATCH 256/262] build(deps): bump actions/setup-python from 4 to 5 (#1536) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 4 to 5. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/mkdocs-deploy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/mkdocs-deploy.yaml b/.github/workflows/mkdocs-deploy.yaml index 91c2954bf..898e7f13d 100644 --- a/.github/workflows/mkdocs-deploy.yaml +++ b/.github/workflows/mkdocs-deploy.yaml @@ -20,7 +20,7 @@ jobs: with: fetch-depth: 0 persist-credentials: true - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3.x - run: | From 55688950955d2faeac58b2a46aa708393cb877f2 Mon Sep 17 00:00:00 2001 From: afdesk Date: Thu, 24 Oct 2024 12:40:41 +0600 Subject: [PATCH 257/262] chore: add go toolchain version (#1710) * chore: add go toolchain version * bump up toolchain to 1.22.7 --- go.mod | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 937a791e0..0bda695d0 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/aquasecurity/kube-bench -go 1.22 +go 1.22.0 + +toolchain go1.22.7 require ( github.com/aws/aws-sdk-go-v2 v1.31.0 From ddb586d441b4524b238fe991288091a0a5d202bd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 24 Oct 2024 12:51:14 +0600 Subject: [PATCH 258/262] build(deps): bump k8s.io/apimachinery from 0.29.3 to 0.31.1 (#1681) * build(deps): bump k8s.io/apimachinery from 0.29.3 to 0.31.1 Bumps [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) from 0.29.3 to 0.31.1. - [Commits](https://github.com/kubernetes/apimachinery/compare/v0.29.3...v0.31.1) --- updated-dependencies: - dependency-name: k8s.io/apimachinery dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * skip go toolchain --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 36 +++++++++++----------- go.sum | 94 +++++++++++++++++++++++++++++++--------------------------- 2 files changed, 70 insertions(+), 60 deletions(-) diff --git a/go.mod b/go.mod index 0bda695d0..f378fd6ae 100644 --- a/go.mod +++ b/go.mod @@ -15,11 +15,11 @@ require ( github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.5.9 gorm.io/gorm v1.25.10 - k8s.io/apimachinery v0.29.3 + k8s.io/apimachinery v0.31.1 k8s.io/client-go v0.29.3 ) @@ -38,15 +38,16 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.4.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect @@ -72,29 +73,30 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect + github.com/x448/float16 v0.8.4 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/net v0.23.0 // indirect + golang.org/x/net v0.26.0 // indirect golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.29.3 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index c559d0258..d88e46338 100644 --- a/go.sum +++ b/go.sum @@ -42,17 +42,21 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= @@ -78,10 +82,10 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -137,12 +141,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -150,8 +154,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= @@ -171,18 +175,22 @@ github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= @@ -192,8 +200,8 @@ go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTV golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -205,16 +213,16 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -226,15 +234,15 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -242,8 +250,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -256,8 +264,8 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -283,19 +291,19 @@ gorm.io/gorm v1.25.10 h1:dQpO+33KalOA+aFYGlK+EfxcI5MbO7EP2yYygwh9h+s= gorm.io/gorm v1.25.10/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= From e48c3dd7b572e3620de267360bbe7556340e027b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 10:05:00 +0600 Subject: [PATCH 259/262] build(deps): bump golangci/golangci-lint-action from 5 to 6 (#1707) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 5 to 6. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v5...v6) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f3b9a19a9..1e246da34 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -32,7 +32,7 @@ jobs: - name: yaml-lint uses: ibiqlik/action-yamllint@v3 - name: Setup golangci-lint - uses: golangci/golangci-lint-action@v5 + uses: golangci/golangci-lint-action@v6 with: version: v1.57.2 args: --verbose From 8a695eb8d1e6575eceb9760f8ef4765b6e5dc09a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 10:26:08 +0600 Subject: [PATCH 260/262] build(deps): bump k8s.io/client-go from 0.29.3 to 0.31.2 (#1712) Bumps [k8s.io/client-go](https://github.com/kubernetes/client-go) from 0.29.3 to 0.31.2. - [Changelog](https://github.com/kubernetes/client-go/blob/master/CHANGELOG.md) - [Commits](https://github.com/kubernetes/client-go/compare/v0.29.3...v0.31.2) --- updated-dependencies: - dependency-name: k8s.io/client-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 10 +++++----- go.sum | 21 ++++++++------------- 2 files changed, 13 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index f378fd6ae..b268c3cc3 100644 --- a/go.mod +++ b/go.mod @@ -19,8 +19,8 @@ require ( gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.5.9 gorm.io/gorm v1.25.10 - k8s.io/apimachinery v0.31.1 - k8s.io/client-go v0.29.3 + k8s.io/apimachinery v0.31.2 + k8s.io/client-go v0.31.2 ) require ( @@ -46,6 +46,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -81,18 +82,17 @@ require ( golang.org/x/crypto v0.24.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.15.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.21.0 // indirect golang.org/x/term v0.21.0 // indirect golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.3 // indirect + k8s.io/api v0.31.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect diff --git a/go.sum b/go.sum index d88e46338..9f909985c 100644 --- a/go.sum +++ b/go.sum @@ -62,7 +62,6 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -208,15 +207,14 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -239,7 +237,6 @@ golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= @@ -256,8 +253,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -289,12 +284,12 @@ gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8= gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI= gorm.io/gorm v1.25.10 h1:dQpO+33KalOA+aFYGlK+EfxcI5MbO7EP2yYygwh9h+s= gorm.io/gorm v1.25.10/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= -k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= -k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= From 86c6a27cc4420db1003007ce067f7f208d61d044 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 14:08:26 +0600 Subject: [PATCH 261/262] build(deps): bump golang from 1.22.7 to 1.23.2 (#1697) Bumps golang from 1.22.7 to 1.23.2. --- updated-dependencies: - dependency-name: golang dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Dockerfile | 2 +- Dockerfile.fips.ubi | 2 +- Dockerfile.ubi | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7a272cc28..30e33e572 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.7 AS build +FROM golang:1.23.2 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.fips.ubi b/Dockerfile.fips.ubi index 91c86b30e..c0abae2de 100644 --- a/Dockerfile.fips.ubi +++ b/Dockerfile.fips.ubi @@ -1,4 +1,4 @@ -FROM golang:1.22.7 AS build +FROM golang:1.23.2 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ diff --git a/Dockerfile.ubi b/Dockerfile.ubi index 074f7b2ef..f2b0e023b 100644 --- a/Dockerfile.ubi +++ b/Dockerfile.ubi @@ -1,4 +1,4 @@ -FROM golang:1.22.7 AS build +FROM golang:1.23.2 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ From b4000f677bb9d18b1dad356264caeceba68133c0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 15:37:35 +0600 Subject: [PATCH 262/262] build(deps): bump gorm.io/gorm from 1.25.10 to 1.25.12 (#1714) Bumps [gorm.io/gorm](https://github.com/go-gorm/gorm) from 1.25.10 to 1.25.12. - [Release notes](https://github.com/go-gorm/gorm/releases) - [Commits](https://github.com/go-gorm/gorm/compare/v1.25.10...v1.25.12) --- updated-dependencies: - dependency-name: gorm.io/gorm dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b268c3cc3..c2c74e679 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/stretchr/testify v1.9.0 gopkg.in/yaml.v2 v2.4.0 gorm.io/driver/postgres v1.5.9 - gorm.io/gorm v1.25.10 + gorm.io/gorm v1.25.12 k8s.io/apimachinery v0.31.2 k8s.io/client-go v0.31.2 ) diff --git a/go.sum b/go.sum index 9f909985c..aaed1a6bf 100644 --- a/go.sum +++ b/go.sum @@ -282,8 +282,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8= gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI= -gorm.io/gorm v1.25.10 h1:dQpO+33KalOA+aFYGlK+EfxcI5MbO7EP2yYygwh9h+s= -gorm.io/gorm v1.25.10/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= +gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw=