diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d05f631db..1e246da34 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -14,49 +14,54 @@ on: - "LICENSE" - "NOTICE" env: - GO_VERSION: "1.16" + GO_VERSION: "1.22.7" KIND_VERSION: "v0.11.1" KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6" jobs: lint: name: Lint - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: 1.16 + go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: yaml-lint uses: ibiqlik/action-yamllint@v3 + - name: Setup golangci-lint + uses: golangci/golangci-lint-action@v6 + with: + version: v1.57.2 + args: --verbose unit: name: Unit tests - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: 1.16 + go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Run unit tests run: make tests - name: Upload code coverage - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v4 with: file: ./coverage.txt e2e: name: E2e tests - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: 1.16 + go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Setup Kubernetes cluster (KIND) uses: engineerd/setup-kind@v0.5.0 with: @@ -78,17 +83,20 @@ jobs: expected_result: PASSED release: name: Release snapshot - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest needs: [e2e, unit] steps: - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: 1.16 + go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Dry-run release snapshot - uses: goreleaser/goreleaser-action@v2 + uses: goreleaser/goreleaser-action@v6 with: - version: v0.169.0 + distribution: goreleaser + version: v1.7.0 args: release --snapshot --skip-publish --rm-dist diff --git a/.github/workflows/mkdocs-deploy.yaml b/.github/workflows/mkdocs-deploy.yaml index 278b6a7cc..898e7f13d 100644 --- a/.github/workflows/mkdocs-deploy.yaml +++ b/.github/workflows/mkdocs-deploy.yaml @@ -13,14 +13,14 @@ on: jobs: deploy: name: Deploy documentation - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Checkout main - uses: actions/checkout@v2 + uses: actions/checkout@v4 with: fetch-depth: 0 persist-credentials: true - - uses: actions/setup-python@v2 + - uses: actions/setup-python@v5 with: python-version: 3.x - run: | diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 10abab815..ce8861a79 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -9,54 +9,59 @@ env: ALIAS: aquasecurity DOCKERHUB_ALIAS: aquasec REP: kube-bench + jobs: publish: name: Publish - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Check Out Repo - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 - name: Cache Docker layers - uses: actions/cache@v2 + uses: actions/cache@v4 with: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildxarch-${{ github.sha }} restore-keys: | ${{ runner.os }}-buildxarch- - name: Login to Docker Hub - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to ECR - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: registry: public.ecr.aws username: ${{ secrets.ECR_ACCESS_KEY_ID }} password: ${{ secrets.ECR_SECRET_ACCESS_KEY }} - name: Get version id: get_version - uses: crazy-max/ghaction-docker-meta@v3 + uses: crazy-max/ghaction-docker-meta@v5 with: images: ${{ env.REP }} tag-semver: | {{version}} - + - name: Extract variables from makefile (kubectl) + id: extract_vars + run: | + echo "KUBECTL_VERSION=$(grep -oP '^KUBECTL_VERSION\s*\?=\s*\K.*' makefile)" >> $GITHUB_ENV - name: Build and push - Docker/ECR id: docker_build - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v6 with: context: . - platforms: linux/amd64,linux/arm64 + platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x builder: ${{ steps.buildx.outputs.name }} push: true build-args: | KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + KUBECTL_VERSION=${{ env.KUBECTL_VERSION }} tags: | ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }} public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }} @@ -64,5 +69,43 @@ jobs: public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:latest cache-from: type=local,src=/tmp/.buildx-cache/release cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release + + - name: Build and push ubi image - Docker/ECR + id: docker_build_ubi + uses: docker/build-push-action@v6 + with: + context: . + platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x + builder: ${{ steps.buildx.outputs.name }} + push: true + file: Dockerfile.ubi + build-args: | + KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + KUBECTL_VERSION=${{ env.KUBECTL_VERSION }} + tags: | + ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi + public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi + cache-from: type=local,src=/tmp/.buildx-cache/release + cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release + - name: Image digest + run: echo ${{ steps.docker_build.outputs.digest }} + + - name: Build and push fips ubi image - Docker/ECR + id: docker_build_fips_ubi + uses: docker/build-push-action@v6 + with: + context: . + platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x + builder: ${{ steps.buildx.outputs.name }} + push: true + file: Dockerfile.fips.ubi + build-args: | + KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + KUBECTL_VERSION=${{ env.KUBECTL_VERSION }} + tags: | + ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi-fips + public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi-fips + cache-from: type=local,src=/tmp/.buildx-cache/release + cache-to: type=local,mode=max,dest=/tmp/.buildx-cache/release - name: Image digest run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 77ff019a2..84ea40db7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,21 +5,23 @@ on: tags: - "v*" env: - GO_VERSION: "1.16" + GO_VERSION: "1.22.7" KIND_VERSION: "v0.11.1" KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6" jobs: release: name: Release - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: 1.16 + go-version: ${{ env.GO_VERSION }} - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Run unit tests run: make tests - name: Setup Kubernetes cluster (KIND) @@ -42,9 +44,10 @@ jobs: second_file_path: integration/testdata/Expected_output.data expected_result: PASSED - name: Release - uses: goreleaser/goreleaser-action@v2 + uses: goreleaser/goreleaser-action@v6 with: - version: v0.169.0 + distribution: goreleaser + version: v1.7.0 args: release --rm-dist env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index c63052575..36a125394 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ coverage.txt thumbs.db /kubeconfig.kube-bench /test.data +*.iml \ No newline at end of file diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 000000000..f8ba09feb --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,12 @@ +--- +linters: + disable-all: true + enable: + - deadcode + - gocyclo + - gofmt + - goimports + - govet + - misspell + - typecheck + - varcheck diff --git a/.goreleaser.yml b/.goreleaser.yml index e60c5a80c..cb15102d0 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -2,20 +2,31 @@ project_name: kube-bench env: - GO111MODULE=on + - CGO_ENABLED=0 - KUBEBENCH_CFG=/etc/kube-bench/cfg builds: - main: main.go binary: kube-bench + tags: + - osusergo + - netgo + - static_build goos: - linux + - darwin goarch: - amd64 - arm - arm64 + - ppc64le + - s390x goarm: - 6 - 7 ldflags: + - "-s" + - "-w" + - "-extldflags '-static'" - "-X github.com/aquasecurity/kube-bench/cmd.KubeBenchVersion={{.Version}}" - "-X github.com/aquasecurity/kube-bench/cmd.cfgDir={{.Env.KUBEBENCH_CFG}}" # Archive customization diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 66c3e17c7..f4b6f1eb5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -77,3 +77,4 @@ Finally, we can use the `make kind-run` target to run the current version of kub Every time you want to test a change, you'll need to rebuild the docker image and push it to cluster before running it again. ( `make build-docker kind-push kind-run` ) +To run the STIG tests locally execute the following: `make build-docker kind-push kind-run-stig` diff --git a/Dockerfile b/Dockerfile index c417a4979..be257bcb0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.17.5 AS build +FROM golang:1.23.2 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ @@ -9,11 +9,20 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.15.0 AS run +# Add kubectl to run policies checks +ARG KUBECTL_VERSION TARGETARCH +RUN wget -O /usr/local/bin/kubectl "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl" +RUN wget -O kubectl.sha256 "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl.sha256" +# Verify kubectl sha256sum +RUN /bin/bash -c 'echo "$( + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/cis-1.23/master.yaml b/cfg/cis-1.23/master.yaml new file mode 100644 index 000000000..db355bb5e --- /dev/null +++ b/cfg/cis-1.23/master.yaml @@ -0,0 +1,966 @@ +--- +controls: +version: "cis-1.23" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 644 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 644 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.23/node.yaml b/cfg/cis-1.23/node.yaml new file mode 100644 index 000000000..08d941acc --- /dev/null +++ b/cfg/cis-1.23/node.yaml @@ -0,0 +1,460 @@ +--- +controls: +version: "cis-1.23" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 644 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "File not found" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 644 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/cis-1.23/policies.yaml b/cfg/cis-1.23/policies.yaml new file mode 100644 index 000000000..92216e4a6 --- /dev/null +++ b/cfg/cis-1.23/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "cis-1.23" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/cis-1.24-microk8s/config.yaml b/cfg/cis-1.24-microk8s/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/cis-1.24-microk8s/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/cis-1.24-microk8s/controlplane.yaml b/cfg/cis-1.24-microk8s/controlplane.yaml new file mode 100644 index 000000000..49c4e0016 --- /dev/null +++ b/cfg/cis-1.24-microk8s/controlplane.yaml @@ -0,0 +1,46 @@ +--- +controls: +version: "cis-1.24" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/cis-1.24-microk8s/etcd.yaml b/cfg/cis-1.24-microk8s/etcd.yaml new file mode 100644 index 000000000..b3ba93e37 --- /dev/null +++ b/cfg/cis-1.24-microk8s/etcd.yaml @@ -0,0 +1,121 @@ +--- +controls: +version: "cis-1.24" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Not applicable. MicroK8s used dqlite and the communication to this service is done through a + local socket (/var/snap/microk8s/current/var/kubernetes/backend/kine.sock:12379) accessible + to users with root permissions. + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Not applicable. MicroK8s used dqlite and the communication to this service is done through a + local socket (/var/snap/microk8s/current/var/kubernetes/backend/kine.sock:12379) accessible + to users with root permissions. + scored: false + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Not applicable. MicroK8s used dqlite and the communication to this service is done through a + local socket (/var/snap/microk8s/current/var/kubernetes/backend/kine.sock:12379) accessible + to users with root permissions. + scored: false + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" + audit: "if test -e /var/snap/microk8s/current/var/kubernetes/backend/cluster.crt && test -e /var/snap/microk8s/current/var/kubernetes/backend/cluster.key; then echo 'certs-found'; fi" + tests: + test_items: + - flag: "certs-found" + remediation: | + The certificate pair for dqlite and tls peer communication is + /var/snap/microk8s/current/var/kubernetes/backend/cluster.crt and + /var/snap/microk8s/current/var/kubernetes/backend/cluster.key. + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/cat $etcdconf | /bin/grep enable-tls || true" + tests: + bin_op: or + test_items: + - flag: "--enable-tls" + compare: + op: eq + value: true + - flag: "--enable-tls" + set: false + remediation: | + MicroK8s used dqlite and tls peer communication uses is TLS if the --enable-tls is set in + /var/snap/microk8s/current/args/k8s-dqlite, set to true by default. + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Not applicable. MicroK8s used dqlite and tls peer communication uses the certificates + created upon the snap creation. + scored: false + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + Not applicable. MicroK8s used dqlite and tls peer communication uses the certificates + created upon the snap creation. + scored: false diff --git a/cfg/cis-1.24-microk8s/master.yaml b/cfg/cis-1.24-microk8s/master.yaml new file mode 100644 index 000000000..d7f537333 --- /dev/null +++ b/cfg/cis-1.24-microk8s/master.yaml @@ -0,0 +1,948 @@ +--- +controls: +version: "cis-1.24" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 644 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c permissions=%a $etcdconf; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + find /var/snap/microk8s/current/args/cni-network/10-calico.conflist -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/snap/microk8s/current/args/cni-network/10-calico.conflist -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + # Etcd is not running on MicroK8s master nodes + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + DATA_DIR='/var/snap/microk8s/current/var/kubernetes/backend/' + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/snap/microk8s/current/var/kubernetes/backend/ + scored: true + + # Etcd is not running on MicroK8s master nodes + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: | + DATA_DIR='/var/snap/microk8s/current/var/kubernetes/backend/' + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" + tests: + test_items: + - flag: "root:root" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown root:root /var/snap/microk8s/current/var/kubernetes/backend/ + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/snap/microk8s/current/credentials/client.config; then stat -c permissions=%a /var/snap/microk8s/current/credentials/client.config; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/snap/microk8s/current/credentials/client.config + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/snap/microk8s/current/credentials/client.config; then stat -c %U:%G /var/snap/microk8s/current/credentials/client.config; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /var/snap/microk8s/current/credentials/client.config + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /var/snap/microk8s/current/certs/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /var/snap/microk8s/current/certs/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /var/snap/microk8s/current/certs/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/snap/microk8s/current/certs/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /var/snap/microk8s/current/certs/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/snap/microk8s/current/certs/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + - flag: "--anonymous-auth" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.17 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "cat $apiserverconf | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + # MicroK8s does not use etcd. The API server talk to a local dqlite instance + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: false + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: false + + - id: 1.2.29 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.30 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(cat $apiserverconf | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "cat $apiserverconf | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "cat $controllermanagerconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "cat $schedulerconf | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "cat $schedulerconf | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.24-microk8s/node.yaml b/cfg/cis-1.24-microk8s/node.yaml new file mode 100644 index 000000000..8da12b3c0 --- /dev/null +++ b/cfg/cis-1.24-microk8s/node.yaml @@ -0,0 +1,458 @@ +--- +controls: +version: "cis-1.24" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi' " + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi' " + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi' " + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: "/bin/sh -c 'if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi' " + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi' " + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi' " + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi' " + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: false + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" + audit: "/bin/sh -c 'if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi' " + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: "{.authentication.anonymous.enabled}" + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: "{.authorization.mode}" + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: "{.authentication.x509.clientCAFile}" + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: "{.readOnlyPort}" + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: "{.readOnlyPort}" + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: "{.streamingConnectionIdleTimeout}" + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: "{.streamingConnectionIdleTimeout}" + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: "{.protectKernelDefaults}" + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: "{.makeIPTablesUtilChains}" + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: "{.makeIPTablesUtilChains}" + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "cat $kubeletconf" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: "{.eventRecordQPS}" + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: "{.tlsCertFile}" + - flag: --tls-private-key-file + path: "{.tlsPrivateKeyFile}" + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: "{.rotateCertificates}" + compare: + op: eq + value: true + - flag: --rotate-certificates + path: "{.rotateCertificates}" + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: "{.featureGates.RotateKubeletServerCertificate}" + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: "{.featureGates.RotateKubeletServerCertificate}" + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "cat $kubeletconf" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: "{range .tlsCipherSuites[:]}{}{','}{end}" + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/cis-1.24-microk8s/policies.yaml b/cfg/cis-1.24-microk8s/policies.yaml new file mode 100644 index 000000000..3ab3eb4b0 --- /dev/null +++ b/cfg/cis-1.24-microk8s/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "cis-1.24" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/cis-1.24/config.yaml b/cfg/cis-1.24/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/cis-1.24/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/cis-1.24/controlplane.yaml b/cfg/cis-1.24/controlplane.yaml new file mode 100644 index 000000000..73e220625 --- /dev/null +++ b/cfg/cis-1.24/controlplane.yaml @@ -0,0 +1,46 @@ +--- +controls: +version: "cis-1.24" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/cis-1.24/etcd.yaml b/cfg/cis-1.24/etcd.yaml new file mode 100644 index 000000000..918069eed --- /dev/null +++ b/cfg/cis-1.24/etcd.yaml @@ -0,0 +1,135 @@ +--- +controls: +version: "cis-1.24" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/cis-1.24/master.yaml b/cfg/cis-1.24/master.yaml new file mode 100644 index 000000000..bd11d8b0b --- /dev/null +++ b/cfg/cis-1.24/master.yaml @@ -0,0 +1,949 @@ +--- +controls: +version: "cis-1.24" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.17 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.29 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.30 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.24/node.yaml b/cfg/cis-1.24/node.yaml new file mode 100644 index 000000000..20f75a072 --- /dev/null +++ b/cfg/cis-1.24/node.yaml @@ -0,0 +1,464 @@ +--- +controls: +version: "cis-1.24" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "File not found" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: false + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/cis-1.24/policies.yaml b/cfg/cis-1.24/policies.yaml new file mode 100644 index 000000000..b59afc67c --- /dev/null +++ b/cfg/cis-1.24/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "cis-1.24" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/cis-1.5/master.yaml b/cfg/cis-1.5/master.yaml index 926cc32ea..df0b491a1 100644 --- a/cfg/cis-1.5/master.yaml +++ b/cfg/cis-1.5/master.yaml @@ -158,7 +158,13 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Scored)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c permissions=%a + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" tests: test_items: - flag: "permissions" @@ -176,7 +182,13 @@ groups: - id: 1.1.12 text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Scored)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G $DATA_DIR tests: test_items: - flag: "etcd:etcd" diff --git a/cfg/cis-1.5/node.yaml b/cfg/cis-1.5/node.yaml index dafda4587..4f219c7fb 100644 --- a/cfg/cis-1.5/node.yaml +++ b/cfg/cis-1.5/node.yaml @@ -25,16 +25,17 @@ groups: scored: true - id: 4.1.2 - text: "Ensure that the kubelet service file ownership is set to root:root (Scored)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' tests: + bin_op: or test_items: - flag: root:root - set: true + - flag: "File not found" remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc scored: true - id: 4.1.3 @@ -48,8 +49,6 @@ groups: compare: op: bitmask value: "644" - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -64,8 +63,6 @@ groups: test_items: - flag: root:root set: true - - flag: "$proxykubeconfig" - set: false remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig @@ -106,7 +103,7 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Scored)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi tests: @@ -124,7 +121,7 @@ groups: - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Scored)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi tests: @@ -475,7 +472,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set TLSCipherSuites: to + If using a Kubelet config file, edit the file to set tlsCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/cis-1.6-k3s/config.yaml b/cfg/cis-1.6-k3s/config.yaml new file mode 100644 index 000000000..afc6ade0d --- /dev/null +++ b/cfg/cis-1.6-k3s/config.yaml @@ -0,0 +1,42 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - scheduler + - controllermanager + - node + + scheduler: + kubeconfig: + - /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig + defaultkubeconfig: /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig + + controllermanager: + kubeconfig: + - /var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig + defaultkubeconfig: /var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig + +etcd: + components: + - etcd + etcd: + confs: + - /var/lib/rancher/k3s/server/db/etcd/config + defaultconf: /var/lib/rancher/k3s/server/db/etcd/config + +node: + components: + - proxy + - kubelet + proxy: + kubeconfig: + - "/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig" + defaultkubeconfig: "/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig" + kubelet: + kubeconfig: + - "/var/lib/rancher/k3s/agent/kubelet.kubeconfig" + defaultkubeconfig: "/var/lib/rancher/k3s/agent/kubelet.kubeconfig" + cafile: + - "/var/lib/rancher/k3s/server/tls/server-ca.crt" + defaultcafile: "/var/lib/rancher/k3s/server/tls/server-ca.crt" diff --git a/cfg/cis-1.6-k3s/controlplane.yaml b/cfg/cis-1.6-k3s/controlplane.yaml new file mode 100644 index 000000000..23b639cfe --- /dev/null +++ b/cfg/cis-1.6-k3s/controlplane.yaml @@ -0,0 +1,40 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-policy-file" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster and pass it to k3s. + e.g. --kube-apiserver-arg='audit-log-path=/var/lib/rancher/k3s/server/logs/audit-log' + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Consider modification of the audit policy in use on the cluster to include these items, at a + minimum. + scored: false diff --git a/cfg/cis-1.6-k3s/etcd.yaml b/cfg/cis-1.6-k3s/etcd.yaml new file mode 100644 index 000000000..1cabd0a57 --- /dev/null +++ b/cfg/cis-1.6-k3s/etcd.yaml @@ -0,0 +1,119 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration Files" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate if use etcd as database (Automated)" + audit: grep -A 4 'client-transport-security' $etcdconf | grep -E 'cert-file|key-file' + tests: + bin_op: and + test_items: + - flag: "cert-file" + - flag: "key-file" + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf. + The config file contains client-transport-security: which has fields that have the peer cert and peer key files. No manual remediation needed. + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: grep 'client-cert-auth' $etcdconf + tests: + test_items: + - flag: "client-cert-auth" + compare: + op: eq + value: true + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf. + client-cert-auth is set to true. No manual remediation needed. + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: grep 'auto-tls' $etcdconf | cat + tests: + bin_op: or + test_items: + - flag: "auto-tls" + set: false + - flag: "auto-tls" + compare: + op: eq + value: false + remediation: | + By default, K3s starts Etcd without this flag. It is set to false by default. + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: grep -A 4 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file' + tests: + bin_op: and + test_items: + - flag: "cert-file" + - flag: "key-file" + remediation: | + By default, K3s starts Etcd with a config file found here, $etcdconf. + The config file contains peer-transport-security: which has fields that have the peer cert and peer key files. + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: grep -A 4 'peer-transport-security' $etcdconf | grep 'client-cert-auth' + tests: + test_items: + - flag: "client-cert-auth" + compare: + op: eq + value: true + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf. + The config file contains peer-transport-security: which has client-cert-auth set to true. No manual remediation needed. + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: grep 'peer-auto-tls' $etcdconf | cat + tests: + bin_op: or + test_items: + - flag: "peer-auto-tls" + set: false + - flag: "peer-auto-tls" + compare: + op: eq + value: false + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf. + Within the file, it does not contain the peer-auto-tls field. No manual remediation needed. + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: | + if [ -f "$etcdconf" ];then + etcd_ca=$(grep 'trusted-ca-file' $etcdconf | awk -F ":|: *" '{print $NF}'); + apiserver_ca=$(journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "trusted-ca-file" | awk -F "=" '{print $NF}') + if [ "$etcd_ca" == "$apiserver_ca" ]; then + echo 'etcd_and_apiserver_have_same_ca'; + else + echo 'etcd_and_apiserver_ca_not_same1' ; + fi + else + echo 'etcd_and_apiserver_ca_not_same'; return ; + fi + tests: + test_items: + - flag: "etcd_and_apiserver_ca_not_same" + remediation: | + By default, K3s uses a config file for etcd that can be found at $etcdconf + and the trusted-ca-file parameters in it are set to unique values specific to etcd. No manual remediation needed. + scored: false diff --git a/cfg/cis-1.6-k3s/master.yaml b/cfg/cis-1.6-k3s/master.yaml new file mode 100644 index 000000000..e0cb3e6af --- /dev/null +++ b/cfg/cis-1.6-k3s/master.yaml @@ -0,0 +1,786 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 1 +text: "Master Node Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Master Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive if etcd is used (Automated)" + audit: stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the below command: + journalctl -u k3s | grep 'Managed etcd' | grep -v grep + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/rancher/k3s/server/db/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd if etcd is used (Not Applicable)" + scored: false + + - id: 1.1.13 + text: "Ensure that the admin.kubeconfig file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chmod 644 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.kubeconfig file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.kubeconfig file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chmod 644 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.kubeconfig file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the cloud-controller.kubeconfig file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the $controllermanagerkubeconfig file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the k3s node. + For example, + chown -R root:root /var/lib/rancher/k3s/server/tls + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Automated)" + audit: "find /var/lib/rancher/k3s/server/tls/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 644 /var/lib/rancher/k3s/server/tls/*.crt + scored: true + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: "find /var/lib/rancher/k3s/server/tls/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: true + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + By default, K3s kube-apiserver is configured to run with --anonymous-auth=false flag and value. + scored: false + + - id: 1.2.2 + text: "Ensure that the --basic-auth-file argument is not set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "basic-auth-file" | cat + tests: + test_items: + - flag: "--basic-auth-file" + set: false + remediation: | + By default, K3s does not run with basic authentication enabled. No manual remediation is needed. + scored: true + + - id: 1.2.3 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "token-auth-file" | cat + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + By default, K3s does not run with basic authentication enabled. No manual remediation is needed. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "kubelet-https" | cat + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + By default, K3s kube-apiserver doesn't run with the --kubelet-https parameter as it runs with TLS. No manual remediation is needed. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E 'kubelet-client-certificate|kubelet-client-key' + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + By default, K3s kube-apiserver is ran with these arguments for secure communication with kubelet. No manual remediation is needed. + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "kubelet-certificate-authority" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + By default, K3s kube-apiserver is ran with this argument for secure communication with kubelet. No manual remediation is needed. + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + By default, K3s sets Node,RBAC as the parameter to the --authorization-mode argument. No manual remediation is needed. + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + By default, K3s sets Node,RBAC as the parameter to the --authorization-mode argument. No manual remediation is needed. + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + By default, K3s sets Node,RBAC as the parameter to the --authorization-mode argument. No manual remediation is needed. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + By default, K3s only sets NodeRestriction,PodSecurityPolicy as the parameter to the --enable-admission-plugins argument. + To configure this, follow the Kubernetes documentation and set the desired limits in a configuration file. + Then refer to K3s's documentation to see how to supply additional api server configuration via the kube-apiserver-arg parameter. + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s only sets NodeRestriction,PodSecurityPolicy as the parameter to the --enable-admission-plugins argument. + No manual remediation needed. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + By default, K3s only sets NodeRestriction,PodSecurityPolicy as the parameter to the --enable-admission-plugins argument. + To configure this, follow the Kubernetes documentation and set the desired limits in a configuration file. + Then refer to K3s's documentation to see how to supply additional api server configuration via the kube-apiserver-arg parameter. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + K3s would need to have the SecurityContextDeny admission plugin enabled by passing it as an argument to K3s. + --kube-apiserver-arg='enable-admission-plugins=SecurityContextDeny + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "ServiceAccount" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not use this argument. + If there's a desire to use this argument, follow the documentation and create ServiceAccount objects as per your environment. + Then refer to K3s's documentation to see how to supply additional api server configuration via the kube-apiserver-arg parameter. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "disable-admission-plugins" | cat + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not use this argument. No manual remediation needed. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin PodSecurityPolicy is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + K3s would need to have the PodSecurityPolicy admission plugin enabled by passing it as an argument to K3s. + --kube-apiserver-arg='enable-admission-plugins=PodSecurityPolicy. + scored: true + + - id: 1.2.17 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + K3s would need to have the NodeRestriction admission plugin enabled by passing it as an argument to K3s. + --kube-apiserver-arg='enable-admission-plugins=NodeRestriction. + scored: true + + - id: 1.2.18 + text: "Ensure that the --insecure-bind-address argument is not set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "enable-admission-plugins" + tests: + test_items: + - flag: "--insecure-bind-address" + set: false + remediation: | + By default, K3s explicitly excludes the use of the --insecure-bind-address parameter. No manual remediation is needed. + scored: true + + - id: 1.2.19 + text: "Ensure that the --insecure-port argument is set to 0 (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "insecure-port" + tests: + test_items: + - flag: "--insecure-port" + compare: + op: eq + value: 0 + remediation: | + By default, K3s starts the kube-apiserver process with this argument's parameter set to 0. No manual remediation is needed. + scored: true + + - id: 1.2.20 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "secure-port" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + By default, K3s sets the parameter of 6444 for the --secure-port argument. No manual remediation is needed. + scored: true + + - id: 1.2.21 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "profiling" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling flag parameter to false. No manual remediation needed. + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-log-path" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='audit-log-path=/path/to/log/file' + scored: true + + - id: 1.2.23 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-log-maxage" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='audit-log-maxage=30' + scored: true + + - id: 1.2.24 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-log-maxbackup" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='audit-log-maxbackup=10' + scored: true + + - id: 1.2.25 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "audit-log-maxsize" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='audit-log-maxsize=100' + scored: true + + - id: 1.2.26 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "request-timeout" | cat + tests: + bin_op: or + test_items: + - flag: "--request-timeout" + set: false + - flag: "--request-timeout" + compare: + op: lte + value: 60 + remediation: | + By default, K3s does not set the --request-timeout argument. No manual remediation needed. + scored: true + + - id: 1.2.27 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "service-account-lookup" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + K3s server needs to be run with the following argument, --kube-apiserver-arg='service-account-lookup=true' + scored: true + + - id: 1.2.28 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "service-account-key-file" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + By default, K3s sets the --service-account-key-file explicitly. No manual remediation needed. + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E 'etcd-certfile|etcd-keyfile' + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + By default, K3s sets the --etcd-certfile and --etcd-keyfile arguments explicitly. No manual remediation needed. + scored: true + + - id: 1.2.30 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E 'tls-cert-file|tls-private-key-file' + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + By default, K3s sets the --tls-cert-file and --tls-private-key-file arguments explicitly. No manual remediation needed. + scored: true + + - id: 1.2.31 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + By default, K3s sets the --client-ca-file argument explicitly. No manual remediation needed. + scored: true + + - id: 1.2.32 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E 'etcd-cafile' + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + By default, K3s sets the --etcd-cafile argument explicitly. No manual remediation needed. + scored: true + + - id: 1.2.33 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep -E "encryption-provider-config" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + K3s server needs to be ran with the follow, --kube-apiserver-arg='encryption-provider-config=/path/to/encryption_config'. + This can be done by running k3s with the --secrets-encryptiuon argument which will configure the encryption provider. + scored: false + + - id: 1.2.34 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: manual + remediation: | + K3s server needs to be run with the following, --secrets-encryption=true, and verify that one of the allowed encryption providers is present. + Run the below command on the master node. + grep aescbc /path/to/encryption-config.json + Verify that aescbc/kms/secretbox is set as the encryption provider for all the desired resources. + scored: true + + - id: 1.2.35 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "tls-cipher-suites" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + remediation: | + By default, K3s explicitly doesn't set this flag. No manual remediation needed. + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: | + journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "terminated-pod-gc-threshold" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + K3s server needs to be run with the following, --kube-controller-manager-arg='terminated-pod-gc-threshold=10. + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "profiling" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling flag parameter to false. No manual remediation needed. + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "use-service-account-credentials" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + K3s server needs to be run with the following, --kube-controller-manager-arg='use-service-account-credentials=true' + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "service-account-private-key-file" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + By default, K3s sets the --service-account-private-key-file argument with the service account key file. No manual remediation needed. + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "root-ca-file" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + By default, K3s sets the --root-ca-file argument with the root ca file. No manual remediation needed. + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "RotateKubeletServerCertificate" | cat + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + By default, K3s implements its own logic for certificate generation and rotation. + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "bind-address" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1. No manual remediation needed. + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-scheduler" | tail -n1 | grep "profiling" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling flag parameter to false. No manual remediation needed. + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: journalctl -u k3s | grep "Running kube-controller-manager" | tail -n1 | grep "bind-address" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1. No manual remediation needed. + scored: true diff --git a/cfg/cis-1.6-k3s/node.yaml b/cfg/cis-1.6-k3s/node.yaml new file mode 100644 index 000000000..1557b5a57 --- /dev/null +++ b/cfg/cis-1.6-k3s/node.yaml @@ -0,0 +1,253 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 4.1.3 + text: "If proxy kubeproxy.kubeconfig file exists ensure permissions are set to 644 or more restrictive (Automated)" + audit: stat -c %a $proxykubeconfig + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "$proxykubeconfig" + set: false + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + By default, K3s creates $kubeletkubeconfig with 644 permissions. No manual remediation needed. + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + By default, K3s creates $kubeletkubeconfig with root:root ownership. No manual remediation needed. + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" + audit: stat -c permissions=%a $kubeletcafile + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + By default, K3s creates $kubeletcafile with 644 permissions. + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: stat -c %U:%G $kubeletcafile + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + By default, K3s creates $kubeletcafile with root:root ownership. + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Not Applicable)" + scored: false + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Not Applicable)" + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the anonymous-auth argument is set to false (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + By default, K3s starts kubelet with --anonymous-auth set to false. No manual remediation needed. + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" + tests: + test_items: + - flag: --authorization-mode + compare: + op: nothave + value: AlwaysAllow + remediation: | + K3s starts kubelet with Webhook as the value for the --authorization-mode argument. No manual remediation needed. + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: journalctl -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" + tests: + test_items: + - flag: --client-ca-file + remediation: | + By default, K3s starts the kubelet process with the --client-ca-file. No manual remediation needed. + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Automated)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep "read-only-port" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + compare: + op: eq + value: 0 + - flag: "--read-only-port" + set: false + remediation: | + By default, K3s starts the kubelet process with the --read-only-port argument set to 0. + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep "streaming-connection-idle-timeout" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + set: false + bin_op: or + remediation: | + By default, K3s does not set --streaming-connection-idle-timeout when starting kubelet. + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep "protect-kernel-defaults" + tests: + test_items: + - flag: --protect-kernel-defaults + compare: + op: eq + value: true + remediation: | + K3s server needs to be started with the following, --protect-kernel-defaults=true. + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep "make-iptables-util-chains" + tests: + test_items: + - flag: --make-iptables-util-chains + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + set: false + bin_op: or + remediation: | + K3s server needs to be run with the following, --kube-apiserver-arg='make-iptables-util-chains=true'. + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Not Applicable)" + scored: false + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: /bin/ps -fC containerd + tests: + test_items: + - flag: --event-qps + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: journalctl -u k3s | grep "Running kubelet" | tail -n1 | grep -E 'tls-cert-file|tls-private-key-file' + tests: + test_items: + - flag: --tls-cert-file + - flag: --tls-private-key-file + remediation: | + By default, K3s sets the --tls-cert-file and --tls-private-key-file arguments when executing the kubelet process. + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Not Applicable)" + scored: false + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Not Applicable)" + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Not Applicable)" + scored: false diff --git a/cfg/cis-1.6-k3s/policies.yaml b/cfg/cis-1.6-k3s/policies.yaml new file mode 100644 index 000000000..f6658cef3 --- /dev/null +++ b/cfg/cis-1.6-k3s/policies.yaml @@ -0,0 +1,260 @@ +--- +controls: +version: "cis-1.6-k3s" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + kubectl get roles --all-namespaces -o yaml + kubectl get clusterroles -o yaml + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.2 + text: "Pod Security Policies" + checks: + - id: 5.2.1 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + kubectl describe psp | grep MustRunAsNonRoot + An operator should apply a PodSecurityPolicy that sets the Rule value to MustRunAsNonRoot. An example of this can be found in the Hardening Guide + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the hostPID value to false explicitly for the PSP it creates. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the HostIPC value to false explicitly for the PSP it creates. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the HostNetwork value to false explicitly for the PSP it creates. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the allowPrivilegeEscalation value to false explicitly for the PSP it creates. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.6 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + An operator should apply a PodSecurityPolicy that sets the runAsUser.Rule value to MustRunAsNonRoot. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + kubectl get psp -o json | jq .spec.requiredDropCapabilities[] + An operator should apply a PodSecurityPolicy that sets .spec.requiredDropCapabilities[] to a value of All. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + kubectl get psp + An operator should apply a PodSecurityPolicy that sets allowedCapabilities to anything other than an empty array. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + kubectl get psp + An operator should apply a PodSecurityPolicy that sets requiredDropCapabilities to ALL. An example of this can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + By default, K3s use Canal (Calico and Flannel) and fully supports network policies. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Run the below command on the master node. + for i in kube-system kube-public default; do + kubectl get networkpolicies -n $i; + done + Verify that there are network policies applied to each of the namespaces. + An operator should apply NetworkPolcyies that prevent unneeded traffic from traversing networks unnecessarily. An example of applying a NetworkPolcy can be found in the Hardening Guide. + https://docs.rancher.cn/docs/k3s/security/hardening-guide/_index + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + Run the following command to find references to objects which use environment variables defined from secrets. + kubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}' -A + + if possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + audit: kubectl get namespaces + type: "manual" + remediation: | + Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions (Manual)" + type: "manual" + remediation: | + Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you + would need to enable alpha features in the apiserver by passing "--feature- + gates=AllAlpha=true" argument. + Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS + parameter to "--feature-gates=AllAlpha=true" + KUBE_API_ARGS="--feature-gates=AllAlpha=true" + Based on your system, restart the kube-apiserver service. For example: + systemctl restart kube-apiserver.service + Use annotations to enable the docker/default seccomp profile in your pod definitions. An + example is as below: + apiVersion: v1 + kind: Pod + metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + scored: false + + - id: 5.7.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Run the below command on the master node. + kubectl get all -n default + The only entries there should be system-managed resources such as the kubernetes service. + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/cis-1.6/master.yaml b/cfg/cis-1.6/master.yaml index 59d5e0b0d..85ed52344 100644 --- a/cfg/cis-1.6/master.yaml +++ b/cfg/cis-1.6/master.yaml @@ -153,7 +153,13 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c permissions=%a + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" tests: test_items: - flag: "permissions" @@ -170,7 +176,13 @@ groups: - id: 1.1.12 text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" - audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G $DATA_DIR tests: test_items: - flag: "etcd:etcd" diff --git a/cfg/cis-1.6/node.yaml b/cfg/cis-1.6/node.yaml index 78080d90f..f6e41f0ee 100644 --- a/cfg/cis-1.6/node.yaml +++ b/cfg/cis-1.6/node.yaml @@ -25,10 +25,12 @@ groups: - id: 4.1.2 text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" - audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' tests: + bin_op: or test_items: - flag: root:root + - flag: "File not found" remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -98,7 +100,7 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi tests: @@ -115,7 +117,7 @@ groups: - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" audit: | - CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}') + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) if test -z $CAFILE; then CAFILE=$kubeletcafile; fi if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi tests: @@ -450,7 +452,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set TLSCipherSuites: to + If using a Kubelet config file, edit the file to set tlsCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/cis-1.7/config.yaml b/cfg/cis-1.7/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/cis-1.7/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/cis-1.7/controlplane.yaml b/cfg/cis-1.7/controlplane.yaml new file mode 100644 index 000000000..eca90eb1d --- /dev/null +++ b/cfg/cis-1.7/controlplane.yaml @@ -0,0 +1,60 @@ +--- +controls: +version: "cis-1.7" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/cis-1.7/etcd.yaml b/cfg/cis-1.7/etcd.yaml new file mode 100644 index 000000000..5fbc8ca05 --- /dev/null +++ b/cfg/cis-1.7/etcd.yaml @@ -0,0 +1,135 @@ +--- +controls: +version: "cis-1.7" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/cis-1.7/master.yaml b/cfg/cis-1.7/master.yaml new file mode 100644 index 000000000..283e89e17 --- /dev/null +++ b/cfg/cis-1.7/master.yaml @@ -0,0 +1,946 @@ +--- +controls: +version: "cis-1.7" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "DenyServiceExternalIPs" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and add the `DenyServiceExternalIPs` plugin + to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs. + scored: false + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 - NoteThis recommendation is obsolete and will be deleted per the consensus process (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: false + + - id: 1.2.17 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.29 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.30 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.7/node.yaml b/cfg/cis-1.7/node.yaml new file mode 100644 index 000000000..29d6816b6 --- /dev/null +++ b/cfg/cis-1.7/node.yaml @@ -0,0 +1,455 @@ +--- +controls: +version: "cis-1.7" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "File not found" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: false + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false diff --git a/cfg/cis-1.7/policies.yaml b/cfg/cis-1.7/policies.yaml new file mode 100644 index 000000000..82592a860 --- /dev/null +++ b/cfg/cis-1.7/policies.yaml @@ -0,0 +1,304 @@ +--- +controls: +version: "cis-1.7" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/cis-1.8/config.yaml b/cfg/cis-1.8/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/cis-1.8/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/cis-1.8/controlplane.yaml b/cfg/cis-1.8/controlplane.yaml new file mode 100644 index 000000000..378b85d63 --- /dev/null +++ b/cfg/cis-1.8/controlplane.yaml @@ -0,0 +1,60 @@ +--- +controls: +version: "cis-1.8" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/cis-1.8/etcd.yaml b/cfg/cis-1.8/etcd.yaml new file mode 100644 index 000000000..8e47f2987 --- /dev/null +++ b/cfg/cis-1.8/etcd.yaml @@ -0,0 +1,135 @@ +--- +controls: +version: "cis-1.8" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/cis-1.8/master.yaml b/cfg/cis-1.8/master.yaml new file mode 100644 index 000000000..8d639daab --- /dev/null +++ b/cfg/cis-1.8/master.yaml @@ -0,0 +1,928 @@ +--- +controls: +version: "cis-1.8" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "DenyServiceExternalIPs" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and add the `DenyServiceExternalIPs` plugin + to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs. + scored: false + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.17 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.21 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.22 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.23 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.24 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.25 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.28 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.29 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.30 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.8/node.yaml b/cfg/cis-1.8/node.yaml new file mode 100644 index 000000000..ac304e6f2 --- /dev/null +++ b/cfg/cis-1.8/node.yaml @@ -0,0 +1,455 @@ +--- +controls: +version: "cis-1.8" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "File not found" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false diff --git a/cfg/cis-1.8/policies.yaml b/cfg/cis-1.8/policies.yaml new file mode 100644 index 000000000..c9b13c94c --- /dev/null +++ b/cfg/cis-1.8/policies.yaml @@ -0,0 +1,304 @@ +--- +controls: +version: "cis-1.8" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/cis-1.9/config.yaml b/cfg/cis-1.9/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/cis-1.9/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/cis-1.9/controlplane.yaml b/cfg/cis-1.9/controlplane.yaml new file mode 100644 index 000000000..4b58b63d4 --- /dev/null +++ b/cfg/cis-1.9/controlplane.yaml @@ -0,0 +1,62 @@ +--- +controls: +version: "cis-1.9" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/cis-1.9/etcd.yaml b/cfg/cis-1.9/etcd.yaml new file mode 100644 index 000000000..efaa41e04 --- /dev/null +++ b/cfg/cis-1.9/etcd.yaml @@ -0,0 +1,135 @@ +--- +controls: +version: "cis-1.9" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/cis-1.9/master.yaml b/cfg/cis-1.9/master.yaml new file mode 100644 index 000000000..51b9ab5d1 --- /dev/null +++ b/cfg/cis-1.9/master.yaml @@ -0,0 +1,919 @@ +--- +controls: +version: "cis-1.9" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the default administrative credential file permissions are set to 600 (Automated)" + audit: | + for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "permissions=%a %n" $adminconf; fi; done + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present. + For example, chmod 600 /etc/kubernetes/super-admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the default administrative credential file ownership is set to root:root (Automated)" + audit: | + for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "ownership=%U:%G %n" $adminconf; fi; done + use_multiple_values: true + tests: + test_items: + - flag: "ownership" + compare: + op: eq + value: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present. + For example, chown root:root /etc/kubernetes/super-admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "DenyServiceExternalIPs" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and add the `DenyServiceExternalIPs` plugin + to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs. + scored: false + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.13 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.15 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.16 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.17 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.20 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.21 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.22 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.23 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.24 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.28 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.29 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.9/node.yaml b/cfg/cis-1.9/node.yaml new file mode 100644 index 000000000..d4fa57a60 --- /dev/null +++ b/cfg/cis-1.9/node.yaml @@ -0,0 +1,478 @@ +--- +controls: +version: "cis-1.9" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "File not found" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false + + - id: 4.3 + text: "kube-proxy" + checks: + - id: 4.3.1 + text: "Ensure that the kube-proxy metrics service is bound to localhost (Automated)" + audit: "/bin/ps -fC $proxybin" + audit_config: "/bin/sh -c 'if test -e $proxykubeconfig; then cat $proxykubeconfig; fi'" + tests: + bin_op: or + test_items: + - flag: "--metrics-bind-address" + path: '{.metricsBindAddress}' + compare: + op: has + value: "127.0.0.1" + - flag: "--metrics-bind-address" + path: '{.metricsBindAddress}' + set: false + remediation: | + Modify or remove any values which bind the metrics service to a non-localhost address. + The default value is 127.0.0.1:10249. + scored: true diff --git a/cfg/cis-1.9/policies.yaml b/cfg/cis-1.9/policies.yaml new file mode 100644 index 000000000..770d2cb28 --- /dev/null +++ b/cfg/cis-1.9/policies.yaml @@ -0,0 +1,405 @@ +--- +controls: +version: "cis-1.9" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Automated)" + audit: | + kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name --no-headers | while read -r role_name role_binding subject + do + if [[ "${role_name}" != "cluster-admin" && "${role_binding}" == "cluster-admin" ]]; then + is_compliant="false" + else + is_compliant="true" + fi; + echo "**role_name: ${role_name} role_binding: ${role_binding} subject: ${subject} is_compliant: ${is_compliant}" + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name] + Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin. + scored: true + + - id: 5.1.2 + text: "Minimize access to secrets (Automated)" + audit: "echo \"canGetListWatchSecretsAsSystemAuthenticated: $(kubectl auth can-i get,list,watch secrets --all-namespaces --as=system:authenticated)\"" + tests: + test_items: + - flag: "canGetListWatchSecretsAsSystemAuthenticated" + compare: + op: eq + value: no + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: true + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Automated)" + audit: | + # Check Roles + kubectl get roles --all-namespaces -o custom-columns=ROLE_NAMESPACE:.metadata.namespace,ROLE_NAME:.metadata.name --no-headers | while read -r role_namespace role_name + do + role_rules=$(kubectl get role -n "${role_namespace}" "${role_name}" -o=json | jq -c '.rules') + if echo "${role_rules}" | grep -q "\[\"\*\"\]"; then + role_is_compliant="false" + else + role_is_compliant="true" + fi; + echo "**role_name: ${role_name} role_namespace: ${role_namespace} role_rules: ${role_rules} role_is_compliant: ${role_is_compliant}" + done + + # Check ClusterRoles + kubectl get clusterroles -o custom-columns=CLUSTERROLE_NAME:.metadata.name --no-headers | while read -r clusterrole_name + do + clusterrole_rules=$(kubectl get clusterrole "${clusterrole_name}" -o=json | jq -c '.rules') + if echo "${clusterrole_rules}" | grep -q "\[\"\*\"\]"; then + clusterrole_is_compliant="false" + else + clusterrole_is_compliant="true" + fi; + echo "**clusterrole_name: ${clusterrole_name} clusterrole_rules: ${clusterrole_rules} clusterrole_is_compliant: ${clusterrole_is_compliant}" + done + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "role_is_compliant" + compare: + op: eq + value: true + set: true + - flag: "clusterrole_is_compliant" + compare: + op: eq + value: true + set: true + remediation: | + Where possible replace any use of wildcards ["*"] in roles and clusterroles with specific + objects or actions. + Condition: role_is_compliant is false if ["*"] is found in rules. + Condition: clusterrole_is_compliant is false if ["*"] is found in rules. + scored: true + + - id: 5.1.4 + text: "Minimize access to create pods (Automated)" + audit: | + echo "canCreatePodsAsSystemAuthenticated: $(kubectl auth can-i create pods --all-namespaces --as=system:authenticated)" + tests: + test_items: + - flag: "canCreatePodsAsSystemAuthenticated" + compare: + op: eq + value: no + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: true + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used (Automated)" + audit: | + kubectl get serviceaccount --all-namespaces --field-selector metadata.name=default -o=json | jq -r '.items[] | " namespace: \(.metadata.namespace), kind: \(.kind), name: \(.metadata.name), automountServiceAccountToken: \(.automountServiceAccountToken | if . == null then "notset" else . end )"' | xargs -L 1 + use_multiple_values: true + tests: + test_items: + - flag: "automountServiceAccountToken" + compare: + op: eq + value: false + set: true + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + `automountServiceAccountToken: false`. + scored: true + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)" + audit: | + kubectl get pods --all-namespaces -o custom-columns=POD_NAMESPACE:.metadata.namespace,POD_NAME:.metadata.name,POD_SERVICE_ACCOUNT:.spec.serviceAccount,POD_IS_AUTOMOUNTSERVICEACCOUNTTOKEN:.spec.automountServiceAccountToken --no-headers | while read -r pod_namespace pod_name pod_service_account pod_is_automountserviceaccounttoken + do + # Retrieve automountServiceAccountToken's value for ServiceAccount and Pod, set to notset if null or . + svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n "${pod_namespace}" "${pod_service_account}" -o json | jq -r '.automountServiceAccountToken' | sed -e 's//notset/g' -e 's/null/notset/g') + pod_is_automountserviceaccounttoken=$(echo "${pod_is_automountserviceaccounttoken}" | sed -e 's//notset/g' -e 's/null/notset/g') + if [ "${svacc_is_automountserviceaccounttoken}" = "false" ] && ( [ "${pod_is_automountserviceaccounttoken}" = "false" ] || [ "${pod_is_automountserviceaccounttoken}" = "notset" ] ); then + is_compliant="true" + elif [ "${svacc_is_automountserviceaccounttoken}" = "true" ] && [ "${pod_is_automountserviceaccounttoken}" = "false" ]; then + is_compliant="true" + else + is_compliant="false" + fi + echo "**namespace: ${pod_namespace} pod_name: ${pod_name} service_account: ${pod_service_account} pod_is_automountserviceaccounttoken: ${pod_is_automountserviceaccounttoken} svacc_is_automountServiceAccountToken: ${svacc_is_automountserviceaccounttoken} is_compliant: ${is_compliant}" + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true + remediation: | + Modify the definition of ServiceAccounts and Pods which do not need to mount service + account tokens to disable it, with `automountServiceAccountToken: false`. + If both the ServiceAccount and the Pod's .spec specify a value for automountServiceAccountToken, the Pod spec takes precedence. + Condition: Pod is_compliant to true when + - ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset + - ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false + scored: true + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/config.yaml b/cfg/config.yaml index 6f7cf0fa8..d5d170bbd 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -36,6 +36,7 @@ master: - /var/snap/microk8s/current/args/kube-apiserver - /etc/origin/master/master-config.yaml - /etc/kubernetes/manifests/talos-kube-apiserver.yaml + - /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml defaultconf: /etc/kubernetes/manifests/kube-apiserver.yaml scheduler: @@ -53,6 +54,7 @@ master: - /var/snap/microk8s/current/args/kube-scheduler - /etc/origin/master/scheduler.json - /etc/kubernetes/manifests/talos-kube-scheduler.yaml + - /var/lib/rancher/rke2/agent/pod-manifests/kube-scheduler.yaml defaultconf: /etc/kubernetes/manifests/kube-scheduler.yaml kubeconfig: - /etc/kubernetes/scheduler.conf @@ -77,6 +79,7 @@ master: - /var/snap/kube-controller-manager/current/args - /var/snap/microk8s/current/args/kube-controller-manager - /etc/kubernetes/manifests/talos-kube-controller-manager.yaml + - /var/lib/rancher/rke2/agent/pod-manifests/kube-controller-manager.yaml defaultconf: /etc/kubernetes/manifests/kube-controller-manager.yaml kubeconfig: - /etc/kubernetes/controller-manager.conf @@ -89,6 +92,10 @@ master: bins: - "etcd" - "openshift start etcd" + datadirs: + - /var/lib/etcd/default.etcd + - /var/lib/etcd/data.etcd + - /var/lib/rancher/k3s/server/db/etcd confs: - /etc/kubernetes/manifests/etcd.yaml - /etc/kubernetes/manifests/etcd.yml @@ -98,7 +105,10 @@ master: - /var/snap/etcd/common/etcd.conf.yaml - /var/snap/microk8s/current/args/etcd - /usr/lib/systemd/system/etcd.service + - /var/lib/rancher/rke2/server/db/etcd/config + - /var/lib/rancher/k3s/server/db/etcd/config defaultconf: /etc/kubernetes/manifests/etcd.yaml + defaultdatadir: /var/lib/etcd/default.etcd flanneld: optional: true @@ -128,6 +138,9 @@ node: - "/etc/kubernetes/certs/ca.crt" - "/etc/kubernetes/cert/ca.pem" - "/var/snap/microk8s/current/certs/ca.crt" + - "/var/lib/rancher/rke2/agent/server.crt" + - "/var/lib/rancher/rke2/agent/client-ca.crt" + - "/var/lib/rancher/k3s/agent/client-ca.crt" svc: # These paths must also be included # in the 'confs' property below @@ -147,13 +160,17 @@ node: - "/var/lib/kubelet/kubeconfig" - "/etc/kubernetes/kubelet-kubeconfig" - "/etc/kubernetes/kubelet/kubeconfig" + - "/etc/kubernetes/ssl/kubecfg-kube-node.yaml" - "/var/snap/microk8s/current/credentials/kubelet.config" - "/etc/kubernetes/kubeconfig-kubelet" + - "/var/lib/rancher/rke2/agent/kubelet.kubeconfig" + - "/var/lib/rancher/k3s/agent/kubelet.kubeconfig" confs: - "/etc/kubernetes/kubelet-config.yaml" - "/var/lib/kubelet/config.yaml" - "/var/lib/kubelet/config.yml" - "/etc/kubernetes/kubelet/kubelet-config.json" + - "/etc/kubernetes/kubelet/config.json" - "/etc/kubernetes/kubelet/config" - "/home/kubernetes/kubelet-config.yaml" - "/home/kubernetes/kubelet-config.yml" @@ -173,6 +190,7 @@ node: - "/etc/systemd/system/snap.kubelet.daemon.service" - "/etc/systemd/system/snap.microk8s.daemon-kubelet.service" - "/etc/kubernetes/kubelet.yaml" + defaultconf: "/var/lib/kubelet/config.yaml" defaultsvc: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" defaultkubeconfig: "/etc/kubernetes/kubelet.conf" @@ -196,8 +214,11 @@ node: - "/etc/kubernetes/kubelet-kubeconfig" - "/etc/kubernetes/kubelet-kubeconfig.conf" - "/etc/kubernetes/kubelet/config" + - "/etc/kubernetes/ssl/kubecfg-kube-proxy.yaml" - "/var/lib/kubelet/kubeconfig" - "/var/snap/microk8s/current/credentials/proxy.config" + - "/var/lib/rancher/rke2/agent/kubeproxy.kubeconfig" + - "/var/lib/rancher/k3s/agent/kubeproxy.kubeconfig" svc: - "/lib/systemd/system/kube-proxy.service" - "/etc/systemd/system/snap.microk8s.daemon-proxy.service" @@ -211,6 +232,10 @@ etcd: etcd: bins: - "etcd" + datadirs: + - /var/lib/etcd/default.etcd + - /var/lib/etcd/data.etcd + - /var/lib/rancher/k3s/server/db/etcd confs: - /etc/kubernetes/manifests/etcd.yaml - /etc/kubernetes/manifests/etcd.yml @@ -220,7 +245,10 @@ etcd: - /var/snap/etcd/common/etcd.conf.yaml - /var/snap/microk8s/current/args/etcd - /usr/lib/systemd/system/etcd.service + - /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml + - /var/lib/rancher/k3s/server/db/etcd/config defaultconf: /etc/kubernetes/manifests/etcd.yaml + defaultdatadir: /var/lib/etcd/default.etcd controlplane: components: @@ -246,14 +274,38 @@ version_mapping: "1.18": "cis-1.6" "1.19": "cis-1.20" "1.20": "cis-1.20" + "1.21": "cis-1.20" + "1.22": "cis-1.23" + "1.23": "cis-1.23" + "1.24": "cis-1.24" + "1.25": "cis-1.7" + "1.26": "cis-1.8" + "1.27": "cis-1.9" + "1.28": "cis-1.9" + "1.29": "cis-1.9" "eks-1.0.1": "eks-1.0.1" + "eks-1.1.0": "eks-1.1.0" + "eks-1.2.0": "eks-1.2.0" "gke-1.0": "gke-1.0" "gke-1.2.0": "gke-1.2.0" + "gke-1.6.0": "gke-1.6.0" "ocp-3.10": "rh-0.7" "ocp-3.11": "rh-0.7" "ocp-4.0": "rh-1.0" "aks-1.0": "aks-1.0" "ack-1.0": "ack-1.0" + "cis-1.6-k3s": "cis-1.6-k3s" + "cis-1.24-microk8s": "cis-1.24-microk8s" + "tkgi-1.2.53": "tkgi-1.2.53" + "k3s-cis-1.7": "k3s-cis-1.7" + "k3s-cis-1.23": "k3s-cis-1.23" + "k3s-cis-1.24": "k3s-cis-1.24" + "rke-cis-1.7": "rke-cis-1.7" + "rke-cis-1.23": "rke-cis-1.23" + "rke-cis-1.24": "rke-cis-1.24" + "rke2-cis-1.7": "rke2-cis-1.7" + "rke2-cis-1.23": "rke2-cis-1.23" + "rke2-cis-1.24": "rke2-cis-1.24" target_mapping: "cis-1.5": @@ -268,12 +320,54 @@ target_mapping: - "controlplane" - "etcd" - "policies" + "cis-1.6-k3s": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" "cis-1.20": - "master" - "node" - "controlplane" - "etcd" - "policies" + "cis-1.23": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + "cis-1.24": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + "cis-1.24-microk8s": + - "master" + - "etcd" + - "node" + - "controlplane" + - "policies" + "cis-1.7": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + "cis-1.8": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" + "cis-1.9": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" "gke-1.0": - "master" - "node" @@ -287,12 +381,30 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "gke-1.6.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" "eks-1.0.1": - "master" - "node" - "controlplane" - "policies" - "managedservices" + "eks-1.1.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" + "eks-1.2.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" "rh-0.7": - "master" - "node" @@ -315,3 +427,74 @@ target_mapping: - "controlplane" - "policies" - "etcd" + "eks-stig-kubernetes-v1r6": + - "node" + - "controlplane" + - "policies" + - "managedservices" + "tkgi-1.2.53": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "k3s-cis-1.7": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "k3s-cis-1.8": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "k3s-cis-1.23": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "k3s-cis-1.24": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "rke-cis-1.7": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "rke-cis-1.23": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "rke-cis-1.24": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "rke2-cis-1.7": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "rke2-cis-1.23": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" + "rke2-cis-1.24": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" diff --git a/cfg/eks-1.0.1/policies.yaml b/cfg/eks-1.0.1/policies.yaml index 3b9aa8b18..cf41ab984 100644 --- a/cfg/eks-1.0.1/policies.yaml +++ b/cfg/eks-1.0.1/policies.yaml @@ -37,7 +37,7 @@ groups: - id: 4.1.4 text: "Minimize access to create pods (Manual)" type: "manual" - Remediation: | + remediation: | Where possible, remove create access to pod objects in the cluster. scored: false diff --git a/cfg/eks-1.1.0/config.yaml b/cfg/eks-1.1.0/config.yaml new file mode 100644 index 000000000..17301a751 --- /dev/null +++ b/cfg/eks-1.1.0/config.yaml @@ -0,0 +1,9 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml +## These settings are required if you are using the --asff option to report findings to AWS Security Hub +## AWS account number is required. +AWS_ACCOUNT: "" +## AWS region is required. +AWS_REGION: "" +## EKS Cluster ARN is required. +CLUSTER_ARN: "" diff --git a/cfg/eks-1.1.0/controlplane.yaml b/cfg/eks-1.1.0/controlplane.yaml new file mode 100644 index 000000000..b432a668d --- /dev/null +++ b/cfg/eks-1.1.0/controlplane.yaml @@ -0,0 +1,14 @@ +--- +controls: +version: "eks-1.1.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Logging" + checks: + - id: 2.1.1 + text: "Enable audit logs (Manual)" + remediation: "Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler." + scored: false diff --git a/cfg/eks-1.1.0/managedservices.yaml b/cfg/eks-1.1.0/managedservices.yaml new file mode 100644 index 000000000..50575d82e --- /dev/null +++ b/cfg/eks-1.1.0/managedservices.yaml @@ -0,0 +1,154 @@ +--- +controls: +version: "eks-1.1.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third-party provider (Manual)" + type: "manual" + remediation: | + To utilize AWS ECR for Image scanning please follow the steps below: + + To create a repository configured for scan on push (AWS CLI): + aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + To edit the settings of an existing repository (AWS CLI): + aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + Use the following steps to start a manual image scan using the AWS Management Console. + Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories. + From the navigation bar, choose the Region to create your repository in. + In the navigation pane, choose Repositories. + On the Repositories page, choose the repository that contains the image to scan. + On the Images page, select the image to scan and then choose Scan. + scored: false + + - id: 5.1.2 + text: "Minimize user access to Amazon ECR (Manual)" + type: "manual" + remediation: | + Before you use IAM to manage access to Amazon ECR, you should understand what IAM features + are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other + AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide. + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Amazon ECR (Manual)" + type: "manual" + remediation: | + You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites. + + The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess + the following IAM policy permissions for Amazon ECR. + + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken" + ], + "Resource": "*" + } + ] + } + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Prefer using dedicated Amazon EKS Service Accounts (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.3 + text: "AWS Key Management Service (KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)" + type: "manual" + remediation: | + This process can only be performed during Cluster Creation. + + Enable 'Secrets Encryption' during Amazon EKS cluster creation as described + in the links within the 'References' section. + scored: false + + - id: 5.4 + text: "Cluster Networking" + checks: + - id: 5.4.1 + text: "Restrict Access to the Control Plane Endpoint (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.2 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.3 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.4 + text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.5 + text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + + - id: 5.5 + text: "Authentication and Authorization" + checks: + - id: 5.5.1 + text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes (Manual)" + type: "manual" + remediation: | + Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation. + scored: false + + + - id: 5.6 + text: "Other Cluster Configurations" + checks: + - id: 5.6.1 + text: "Consider Fargate for running untrusted workloads (Manual)" + type: "manual" + remediation: | + Create a Fargate profile for your cluster Before you can schedule pods running on Fargate + in your cluster, you must define a Fargate profile that specifies which pods should use + Fargate when they are launched. For more information, see AWS Fargate profile. + + Note: If you created your cluster with eksctl using the --fargate option, then a Fargate profile has + already been created for your cluster with selectors for all pods in the kube-system + and default namespaces. Use the following procedure to create Fargate profiles for + any other namespaces you would like to use with Fargate. + scored: false diff --git a/cfg/eks-1.1.0/master.yaml b/cfg/eks-1.1.0/master.yaml new file mode 100644 index 000000000..51de3602b --- /dev/null +++ b/cfg/eks-1.1.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "eks-1.1.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/eks-1.1.0/node.yaml b/cfg/eks-1.1.0/node.yaml new file mode 100644 index 000000000..003be5892 --- /dev/null +++ b/cfg/eks-1.1.0/node.yaml @@ -0,0 +1,330 @@ +--- +controls: +version: "eks-1.1.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: true + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.9 + text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: true + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.11 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + - id: 3.3 + text: "Container Optimized OS" + checks: + - id: 3.3.1 + text: "Prefer using Container-Optimized OS when possible (Manual)" + remediation: "No remediation" + scored: false diff --git a/cfg/eks-1.1.0/policies.yaml b/cfg/eks-1.1.0/policies.yaml new file mode 100644 index 000000000..e4d13a1c4 --- /dev/null +++ b/cfg/eks-1.1.0/policies.yaml @@ -0,0 +1,205 @@ +--- +controls: +version: "eks-1.1.0" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 4.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.2 + text: "Pod Security Policies" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that + the .spec.privileged field is omitted or set to false. + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostPID field is omitted or set to false. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostIPC field is omitted or set to false. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostNetwork field is omitted or set to false. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.allowPrivilegeEscalation field is omitted or set to false. + scored: false + + - id: 4.2.6 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of + UIDs not including 0. + scored: false + + - id: 4.2.7 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + scored: false + + - id: 4.2.8 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in PSPs for the cluster unless + it is set to an empty array. + scored: false + + - id: 4.2.9 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 4.3 + text: "CNI Plugin" + checks: + - id: 4.3.1 + text: "Ensure that the latest CNI version is used (Manual)" + type: "manual" + remediation: | + Review the documentation of AWS CNI plugin, and ensure latest CNI version is used. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "Extensible Admission Control" + checks: [] + + - id: 4.6 + text: "General Policies" + checks: + - id: 4.6.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.6.2 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.6.3 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/eks-1.2.0/config.yaml b/cfg/eks-1.2.0/config.yaml new file mode 100644 index 000000000..17301a751 --- /dev/null +++ b/cfg/eks-1.2.0/config.yaml @@ -0,0 +1,9 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml +## These settings are required if you are using the --asff option to report findings to AWS Security Hub +## AWS account number is required. +AWS_ACCOUNT: "" +## AWS region is required. +AWS_REGION: "" +## EKS Cluster ARN is required. +CLUSTER_ARN: "" diff --git a/cfg/eks-1.2.0/controlplane.yaml b/cfg/eks-1.2.0/controlplane.yaml new file mode 100644 index 000000000..d7e3444b2 --- /dev/null +++ b/cfg/eks-1.2.0/controlplane.yaml @@ -0,0 +1,14 @@ +--- +controls: +version: "eks-1.2.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Logging" + checks: + - id: 2.1.1 + text: "Enable audit logs (Manual)" + remediation: "Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler." + scored: false diff --git a/cfg/eks-1.2.0/managedservices.yaml b/cfg/eks-1.2.0/managedservices.yaml new file mode 100644 index 000000000..c9ae5ff3f --- /dev/null +++ b/cfg/eks-1.2.0/managedservices.yaml @@ -0,0 +1,154 @@ +--- +controls: +version: "eks-1.2.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning using Amazon ECR image scanning or a third-party provider (Manual)" + type: "manual" + remediation: | + To utilize AWS ECR for Image scanning please follow the steps below: + + To create a repository configured for scan on push (AWS CLI): + aws ecr create-repository --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + To edit the settings of an existing repository (AWS CLI): + aws ecr put-image-scanning-configuration --repository-name $REPO_NAME --image-scanning-configuration scanOnPush=true --region $REGION_CODE + + Use the following steps to start a manual image scan using the AWS Management Console. + Open the Amazon ECR console at https://console.aws.amazon.com/ecr/repositories. + From the navigation bar, choose the Region to create your repository in. + In the navigation pane, choose Repositories. + On the Repositories page, choose the repository that contains the image to scan. + On the Images page, select the image to scan and then choose Scan. + scored: false + + - id: 5.1.2 + text: "Minimize user access to Amazon ECR (Manual)" + type: "manual" + remediation: | + Before you use IAM to manage access to Amazon ECR, you should understand what IAM features + are available to use with Amazon ECR. To get a high-level view of how Amazon ECR and other + AWS services work with IAM, see AWS Services That Work with IAM in the IAM User Guide. + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Amazon ECR (Manual)" + type: "manual" + remediation: | + You can use your Amazon ECR images with Amazon EKS, but you need to satisfy the following prerequisites. + + The Amazon EKS worker node IAM role (NodeInstanceRole) that you use with your worker nodes must possess + the following IAM policy permissions for Amazon ECR. + + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer", + "ecr:GetAuthorizationToken" + ], + "Resource": "*" + } + ] + } + scored: false + + - id: 5.1.4 + text: "Minimize Container Registries to only those approved (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Prefer using dedicated Amazon EKS Service Accounts (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.3 + text: "AWS Key Management Service (KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using Customer Master Keys (CMKs) managed in AWS KMS (Manual)" + type: "manual" + remediation: | + This process can only be performed during Cluster Creation. + + Enable 'Secrets Encryption' during Amazon EKS cluster creation as described + in the links within the 'References' section. + scored: false + + - id: 5.4 + text: "Cluster Networking" + checks: + - id: 5.4.1 + text: "Restrict Access to the Control Plane Endpoint (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.2 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.3 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.4 + text: "Ensure Network Policy is Enabled and set as appropriate (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + - id: 5.4.5 + text: "Encrypt traffic to HTTPS load balancers with TLS certificates (Manual)" + type: "manual" + remediation: "No remediation" + scored: false + + + - id: 5.5 + text: "Authentication and Authorization" + checks: + - id: 5.5.1 + text: "Manage Kubernetes RBAC users with AWS IAM Authenticator for Kubernetes (Manual)" + type: "manual" + remediation: | + Refer to the 'Managing users or IAM roles for your cluster' in Amazon EKS documentation. + scored: false + + + - id: 5.6 + text: "Other Cluster Configurations" + checks: + - id: 5.6.1 + text: "Consider Fargate for running untrusted workloads (Manual)" + type: "manual" + remediation: | + Create a Fargate profile for your cluster Before you can schedule pods running on Fargate + in your cluster, you must define a Fargate profile that specifies which pods should use + Fargate when they are launched. For more information, see AWS Fargate profile. + + Note: If you created your cluster with eksctl using the --fargate option, then a Fargate profile has + already been created for your cluster with selectors for all pods in the kube-system + and default namespaces. Use the following procedure to create Fargate profiles for + any other namespaces you would like to use with Fargate. + scored: false diff --git a/cfg/eks-1.2.0/master.yaml b/cfg/eks-1.2.0/master.yaml new file mode 100644 index 000000000..8da0179b3 --- /dev/null +++ b/cfg/eks-1.2.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "eks-1.2.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/eks-1.2.0/node.yaml b/cfg/eks-1.2.0/node.yaml new file mode 100644 index 000000000..54a3a9625 --- /dev/null +++ b/cfg/eks-1.2.0/node.yaml @@ -0,0 +1,330 @@ +--- +controls: +version: "eks-1.2.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: false + + - id: 3.1.2 + text: "Ensure that the kubelet kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: false + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: false + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: false + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the Anonymous Auth is Not Enabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.3 + text: "Ensure that a Client CA File is Configured (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.4 + text: "Ensure that the --read-only-port is disabled (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: true + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: true + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated) " + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: true + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.9 + text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: gte + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.10 + text: "Ensure that the --rotate-certificates argument is not present or is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: true + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 3.2.11 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: true + compare: + op: eq + value: true + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + - id: 3.3 + text: "Container Optimized OS" + checks: + - id: 3.3.1 + text: "Prefer using a container-optimized OS when possible (Manual)" + remediation: "No remediation" + scored: false diff --git a/cfg/eks-1.2.0/policies.yaml b/cfg/eks-1.2.0/policies.yaml new file mode 100644 index 000000000..7f5db95d9 --- /dev/null +++ b/cfg/eks-1.2.0/policies.yaml @@ -0,0 +1,213 @@ +--- +controls: +version: "eks-1.2.0" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 4.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 4.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 4.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 4.2 + text: "Pod Security Policies" + checks: + - id: 4.2.1 + text: "Minimize the admission of privileged containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that + the .spec.privileged field is omitted or set to false. + scored: false + + - id: 4.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostPID field is omitted or set to false. + scored: false + + - id: 4.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostIPC field is omitted or set to false. + scored: false + + - id: 4.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostNetwork field is omitted or set to false. + scored: false + + - id: 4.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.allowPrivilegeEscalation field is omitted or set to false. + scored: false + + - id: 4.2.6 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of + UIDs not including 0. + scored: false + + - id: 4.2.7 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in PSPs for the cluster unless + it is set to an empty array. + scored: false + + - id: 4.2.8 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 4.3 + text: "CNI Plugin" + checks: + - id: 4.3.1 + text: "Ensure CNI plugin supports network policies (Manual)" + type: "manual" + remediation: | + As with RBAC policies, network policies should adhere to the policy of least privileged + access. Start by creating a deny all policy that restricts all inbound and outbound traffic + from a namespace or create a global policy using Calico. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "Extensible Admission Control" + checks: [] + + - id: 4.6 + text: "General Policies" + checks: + - id: 4.6.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.6.2 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 4.6.3 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/eks-stig-kubernetes-v1r6/config.yaml b/cfg/eks-stig-kubernetes-v1r6/config.yaml new file mode 100644 index 000000000..17301a751 --- /dev/null +++ b/cfg/eks-stig-kubernetes-v1r6/config.yaml @@ -0,0 +1,9 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml +## These settings are required if you are using the --asff option to report findings to AWS Security Hub +## AWS account number is required. +AWS_ACCOUNT: "" +## AWS region is required. +AWS_REGION: "" +## EKS Cluster ARN is required. +CLUSTER_ARN: "" diff --git a/cfg/eks-stig-kubernetes-v1r6/controlplane.yaml b/cfg/eks-stig-kubernetes-v1r6/controlplane.yaml new file mode 100644 index 000000000..cc3815974 --- /dev/null +++ b/cfg/eks-stig-kubernetes-v1r6/controlplane.yaml @@ -0,0 +1,124 @@ +--- +controls: +version: "eks-stig-kubernetes-v1r6" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "DISA Category Code I" + checks: + - id: V-242390 + text: "The Kubernetes API server must have anonymous authentication disabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit $kubeletconf to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242400 + text: "The Kubernetes API server must have Alpha APIs disabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "AllAlpha=true" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit any manifest files or $kubeletconf that contain the feature-gates + setting with AllAlpha set to "true". + Set the flag to "false" or remove the "AllAlpha" setting + completely. Restart the kubelet service if the kubelet config file + if the kubelet config file is changed. + scored: true + - id: 2.2 + text: "DISA Category Code II" + checks: + - id: V-242381 + text: "The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + - id: V-242402 + text: "The Kubernetes API Server must have an audit log path set (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: V-242403 + text: "Kubernetes API Server must generate audit records (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: V-242461 + text: "Kubernetes API Server audit logs must be enabled. (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: V-242462 + text: "The Kubernetes API Server must be set to audit log max size. (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: V-242463 + text: "The Kubernetes API Server must be set to audit log maximum backup. (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: V-242464 + text: "The Kubernetes API Server audit log retention must be set. (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: V-242465 + text: "The Kubernetes API Server audit log path must be set. (Manual)" + type: "manual" + remediation: | + Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + scored: false + - id: 2.2 + text: "DISA Category Code II" + checks: + - id: V-242443 + text: " Kubernetes must contain the latest updates as authorized by IAVMs, CTOs, DTMs, and STIGs. (Manual)" + type: "manual" + remediation: | + Upgrade Kubernetes to a supported version. + Ref: https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html diff --git a/cfg/eks-stig-kubernetes-v1r6/managedservices.yaml b/cfg/eks-stig-kubernetes-v1r6/managedservices.yaml new file mode 100644 index 000000000..23c4eaa9f --- /dev/null +++ b/cfg/eks-stig-kubernetes-v1r6/managedservices.yaml @@ -0,0 +1,268 @@ +--- +controls: +version: "eks-stig-kubernetes-v1r6" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "DISA Category Code I" + checks: + - id: V-242386 + text: "The Kubernetes API server must have the insecure port flag disabled | Component of EKS Control Plane" + type: "skip" + + - id: V-242388 + text: "The Kubernetes API server must have the insecure bind address not set | Component of EKS Control Plane" + type: "skip" + + - id: V-242436 + text: "The Kubernetes API server must have the ValidatingAdmissionWebhook enabled (manual)" + type: "manual" + remediation: | + Amazon EKS version 1.18 and later automatically enable ValidatingAdmissionWebhook + Ref: https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + scored: false + + - id: V-245542 + text: "Kubernetes API Server must disable basic authentication to protect information in transit | Component of EKS Control Plane" + type: "skip" + + - id: 5.2 + text: "DISA Category Code II" + checks: + - id: V-242376 + text: "The Kubernetes Controller Manager must use TLS 1.2, at a minimum | Component of EKS Control Plane" + type: "skip" + + - id: V-242377 + text: "The Kubernetes Scheduler must use TLS 1.2, at a minimum | Component of EKS Control Plane" + type: "skip" + + - id: V-242378 + text: "The Kubernetes API Server must use TLS 1.2, at a minimum | Component of EKS Control Plane" + type: "skip" + + - id: V-242379 + text: "The Kubernetes etcd must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of EKS Control Plane" + type: "skip" + + - id: V-242380 + text: "The Kubernetes etcd must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of EKS Control Plane" + type: "skip" + + - id: V-242382 + text: "The Kubernetes API Server must enable Node,RBAC as the authorization mode | Component of EKS Control Plane" + type: "skip" + + - id: V-242384 + text: "The Kubernetes Scheduler must have secure binding | Component of EKS Control Plane" + type: "skip" + + - id: V-242385 + text: "The Kubernetes Controller Manager must have secure binding | Component of EKS Control Plane" + type: "skip" + + - id: V-242389 + text: "The Kubernetes API server must have the secure port set | Component of EKS Control Plane" + type: "skip" + + - id: V-242401 + text: "The Kubernetes API Server must have an audit policy set | Component of EKS Control Plane" + type: "skip" + + - id: V-242402 + text: "The Kubernetes API Server must have an audit log path set | Component of EKS Control Plane" + type: "skip" + + - id: V-242403 + text: "Kubernetes API Server must generate audit records | Component of EKS Control Plane" + type: "skip" + + - id: V-242405 + text: "The Kubernetes manifests must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242408 + text: "The Kubernetes manifests must have least privileges | Component of EKS Control Plane" + type: "skip" + + - id: V-242409 + text: "Kubernetes Controller Manager must disable profiling | Component of EKS Control Plane" + type: "skip" + + - id: V-242410 + text: "The Kubernetes API Server must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane" + type: "skip" + + - id: V-242411 + text: "The Kubernetes Scheduler must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane" + type: "skip" + + - id: V-242412 + text: "The Kubernetes Controllers must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane" + type: "skip" + + - id: V-242413 + text: "The Kubernetes etcd must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane" + type: "skip" + + - id: V-242418 + text: "The Kubernetes API server must use approved cipher suites | Component of EKS Control Plane" + type: "skip" + + - id: V-242419 + text: "Kubernetes API Server must have the SSL Certificate Authority set | Component of EKS Control Plane" + type: "skip" + + - id: V-242420 + text: "Kubernetes Kubelet must have the SSL Certificate Authority set | Component of EKS Control Plane" + type: "skip" + + - id: V-242421 + text: "Kubernetes Controller Manager must have the SSL Certificate Authority set | Component of EKS Control Plane" + type: "skip" + + - id: V-242422 + text: "Kubernetes API Server must have a certificate for communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242423 + text: "Kubernetes etcd must enable client authentication to secure service | Component of EKS Control Plane" + type: "skip" + + - id: V-242424 + text: "Kubernetes etcd must enable client authentication to secure service | Component of EKS Control Plane" + type: "skip" + + - id: V-242425 + text: "Kubernetes Kubelet must enable tls-cert-file for client authentication to secure service | Component of EKS Control Plane" + type: "skip" + + - id: V-242426 + text: "Kubernetes etcd must enable client authentication to secure service | Component of EKS Control Plane" + type: "skip" + + - id: V-242427 + text: "Kubernetes etcd must have a key file for secure communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242428 + text: "Kubernetes etcd must have a certificate for communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242429 + text: "Kubernetes etcd must have the SSL Certificate Authority set | Component of EKS Control Plane" + type: "skip" + + - id: V-242430 + text: "Kubernetes etcd must have a certificate for communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242431 + text: "Kubernetes etcd must have a key file for secure communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242432 + text: "Kubernetes etcd must have peer-cert-file set for secure communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242433 + text: "Kubernetes etcd must have a peer-key-file set for secure communication | Component of EKS Control Plane" + type: "skip" + + - id: V-242438 + text: "Kubernetes API Server must configure timeouts to limit attack surface | Component of EKS Control Plane" + type: "skip" + + - id: V-242444 + text: "The Kubernetes component manifests must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242445 + text: "The Kubernetes component etcd must be owned by etcd | Component of EKS Control Plane" + type: "skip" + + - id: V-242446 + text: "The Kubernetes conf files must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242447 + text: "The Kubernetes Kube Proxy must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242448 + text: "The Kubernetes Kube Proxy must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242449 + text: "The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242450 + text: "The Kubernetes Kubelet certificate authority must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242451 + text: "The Kubernetes component PKI must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242452 + text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242453 + text: "The Kubernetes kubelet config must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242454 + text: "The Kubernetes kubeadm.conf must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242455 + text: "The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242456 + text: "The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242457 + text: "The Kubernetes kubelet config must be owned by root | Component of EKS Control Plane" + type: "skip" + + - id: V-242458 + text: "The Kubernetes API Server must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242459 + text: "The Kubernetes etcd must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242460 + text: "The Kubernetes admin.conf must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242466 + text: "The Kubernetes PKI CRT must have file permissions set to 644 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242467 + text: "The Kubernetes PKI keys must have file permissions set to 600 or more restrictive | Component of EKS Control Plane" + type: "skip" + + - id: V-242468 + text: "The Kubernetes API Server must prohibit communication using TLS version 1.0 and 1.1, and SSL 2.0 and 3.0 | Component of EKS Control Plane" + type: "skip" + + - id: V-245541 + text: "Kubernetes Kubelet must not disable timeouts | Component of EKS Control Plane" + type: "skip" + + - id: V-245543 + text: "Kubernetes API Server must disable token authentication to protect information in transit | Component of EKS Control Plane" + type: "skip" + + - id: V-245544 + text: "Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit | Component of EKS Control Plane" + type: "skip" diff --git a/cfg/eks-stig-kubernetes-v1r6/master.yaml b/cfg/eks-stig-kubernetes-v1r6/master.yaml new file mode 100644 index 000000000..8152a1abc --- /dev/null +++ b/cfg/eks-stig-kubernetes-v1r6/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "eks-stig-kubernetes-v1r6" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/eks-stig-kubernetes-v1r6/node.yaml b/cfg/eks-stig-kubernetes-v1r6/node.yaml new file mode 100644 index 000000000..960912411 --- /dev/null +++ b/cfg/eks-stig-kubernetes-v1r6/node.yaml @@ -0,0 +1,287 @@ +--- +controls: +version: "eks-stig-kubernetes-v1r6" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "DISA Category Code I" + checks: + - id: V-242387 # CIS 3.2.4 + text: "The Kubernetes Kubelet must have the read-only port flag disabled (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit $kubeletconf to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + - id: V-242391 # CIS 3.2.1 + text: "The Kubernetes Kubelet must have anonymous authentication disabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + set: true + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit $kubeletconf to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242392 # CIS 3.2.2 + text: "The Kubernetes kubelet must enable explicit authorization (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit $kubeletconf to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242397 + text: "The Kubernetes kubelet static PodPath must not enable static pods (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - path: '{.staticPodPath}' + set: false + remediation: | + Edit $kubeletconf on each node to to remove the staticPodPath + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242415 + text: "Secrets in Kubernetes must not be stored as environment variables.(Manual)" + type: "manual" + remediation: | + Run the following command: + kubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}' -A + If any of the values returned reference environment variables + rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + - id: V-242434 # CIS 3.2.6 + text: "Kubernetes Kubelet must enable kernel protection (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: true + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit $kubeletconf to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242435 + text: "Kubernetes must prevent non-privileged users from executing privileged functions (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + set: true + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit $kubeletconf to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242393 + text: "Kubernetes Worker Nodes must not have sshd service running. (Automated)" + audit: '/bin/sh -c ''systemctl show -p ActiveState sshd'' ' + tests: + test_items: + - flag: ActiveState + compare: + op: eq + value: inactive + remediation: | + To stop the sshd service, run the command: systemctl stop sshd + scored: true + - id: V-242394 + text: "Kubernetes Worker Nodes must not have the sshd service enabled. (Automated)" + audit: "/bin/sh -c 'systemctl is-enabled sshd.service'" + tests: + test_items: + - flag: "disabled" + remediation: | + To disable the sshd service, run the command: + chkconfig sshd off + scored: true + - id: V-242395 + text: "Kubernetes dashboard must not be enabled. (Manual)" + type: "manual" + remediation: | + Run the command: kubectl get pods --all-namespaces -l k8s-app=kubernetes-dashboard + If any resources are returned, this is a finding. + Fix Text: Delete the Kubernetes dashboard deployment with the following command: + kubectl delete deployment kubernetes-dashboard --namespace=kube-system + scored: false + - id: V-242398 + text: "Kubernetes DynamicAuditing must not be enabled. (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "DynamicAuditing=true" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit any manifest files or kubelet config files that contain the feature-gates + setting with DynamicAuditing set to "true". + Set the flag to "false" or remove the "DynamicAuditing" setting + completely. Restart the kubelet service if the kubelet config file + if the kubelet config file is changed. + scored: true + - id: V-242399 + text: "Kubernetes DynamicKubeletConfig must not be enabled. (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "DynamicKubeletConfig=true" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit any manifest files or $kubeletconf that contain the feature-gates + setting with DynamicKubeletConfig set to "true". + Set the flag to "false" or remove the "DynamicKubeletConfig" setting + completely. Restart the kubelet service if the kubelet config file + if the kubelet config file is changed. + scored: true + - id: V-242404 # CIS 3.2.8 + text: "Kubernetes Kubelet must deny hostname override (Automated)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletbin + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + - id: V-242406 + text: "The Kubernetes kubelet configuration file must be owned by root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + - id: V-242407 + text: "The Kubernetes kubelet configuration files must have file permissions set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: true + - id: V-242414 + text: "The Kubernetes cluster must use non-privileged host ports for user pods. (Manual)" + type: "manual" + remediation: | + For any of the pods that are using ports below 1024, + reconfigure the pod to use a service to map a host non-privileged + port to the pod port or reconfigure the image to use non-privileged ports. + scored: false + - id: V-242442 + text: "Kubernetes must remove old components after updated versions have been installed. (Manual)" + type: "manual" + remediation: | + To view all pods and the images used to create the pods, from the Master node, run the following command: + kubectl get pods --all-namespaces -o jsonpath="{..image}" | \ + tr -s '[[:space:]]' '\n' | \ + sort | \ + uniq -c + Review the images used for pods running within Kubernetes. + Remove any old pods that are using older images. + scored: false + - id: V-242396 + text: "Kubernetes Kubectl cp command must give expected access and results. (Manual)" + type: "manual" + remediation: | + If any Worker nodes are not using kubectl version 1.12.9 or newer, this is a finding. + Upgrade the Master and Worker nodes to the latest version of kubectl. + scored: false diff --git a/cfg/eks-stig-kubernetes-v1r6/policies.yaml b/cfg/eks-stig-kubernetes-v1r6/policies.yaml new file mode 100644 index 000000000..e91eacc4f --- /dev/null +++ b/cfg/eks-stig-kubernetes-v1r6/policies.yaml @@ -0,0 +1,33 @@ +--- +controls: +version: "eks-stig-kubernetes-v1r6" +id: 4 +text: "Policies" +type: "policies" +groups: + - id: 4.1 + text: "Policies - DISA Category Code I" + checks: + - id: V-242381 + text: "The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: V-242383 + text: "User-managed resources must be created in dedicated namespaces. (Manual)" + type: "manual" + remediation: | + Move any user-managed resources from the default, kube-public and kube-node-lease namespaces, to user namespaces. + scored: false + + - id: V-242417 + text: "Kubernetes must separate user functionality. (Manual)" + type: "manual" + remediation: | + Move any user pods that are present in the Kubernetes system namespaces to user specific namespaces. + scored: false diff --git a/cfg/gke-1.0/policies.yaml b/cfg/gke-1.0/policies.yaml index 54ce12bd9..0278ec0b5 100644 --- a/cfg/gke-1.0/policies.yaml +++ b/cfg/gke-1.0/policies.yaml @@ -37,7 +37,7 @@ groups: - id: 5.1.4 text: "Minimize access to create pods (Not Scored)" type: "manual" - Remediation: | + remediation: | Where possible, remove create access to pod objects in the cluster. scored: false diff --git a/cfg/gke-1.2.0/policies.yaml b/cfg/gke-1.2.0/policies.yaml index e1261c0e3..6ecb1ff70 100644 --- a/cfg/gke-1.2.0/policies.yaml +++ b/cfg/gke-1.2.0/policies.yaml @@ -37,7 +37,7 @@ groups: - id: 4.1.4 text: "Minimize access to create pods (Manual)" type: "manual" - Remediation: | + remediation: | Where possible, remove create access to pod objects in the cluster. scored: false diff --git a/cfg/gke-1.6.0/config.yaml b/cfg/gke-1.6.0/config.yaml new file mode 100644 index 000000000..e15e43f48 --- /dev/null +++ b/cfg/gke-1.6.0/config.yaml @@ -0,0 +1,9 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +node: + proxy: + defaultkubeconfig: "/var/lib/kubelet/kubeconfig" + + kubelet: + defaultconf: "/etc/kubernetes/kubelet/kubelet-config.yaml" diff --git a/cfg/gke-1.6.0/controlplane.yaml b/cfg/gke-1.6.0/controlplane.yaml new file mode 100644 index 000000000..011d86075 --- /dev/null +++ b/cfg/gke-1.6.0/controlplane.yaml @@ -0,0 +1,20 @@ +--- +controls: +version: "gke-1.6.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Authentication and Authorization" + checks: + - id: 2.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + + You can remediate the availability of client certificates in your GKE cluster. See + Recommendation 5.8.1. + scored: false diff --git a/cfg/gke-1.6.0/managedservices.yaml b/cfg/gke-1.6.0/managedservices.yaml new file mode 100644 index 000000000..e82320d67 --- /dev/null +++ b/cfg/gke-1.6.0/managedservices.yaml @@ -0,0 +1,617 @@ +--- +controls: +version: "gke-1.6.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning is enabled (Automated)" + type: "manual" + remediation: | + For Images Hosted in GCR: + Using Command Line: + + gcloud services enable containeranalysis.googleapis.com + + For Images Hosted in AR: + Using Command Line: + + gcloud services enable containerscanning.googleapis.com + scored: false + + - id: 5.1.2 + text: "Minimize user access to Container Image repositories (Manual)" + type: "manual" + remediation: | + For Images Hosted in AR: + Using Command Line: + + gcloud artifacts repositories set-iam-policy \ + --location + + To learn how to configure policy files see: https://cloud.google.com/artifact-registry/docs/access-control#grant + + For Images Hosted in GCR: + Using Command Line: + To change roles at the GCR bucket level: + Firstly, run the following if read permissions are required: + + gsutil iam ch ::objectViewer gs://artifacts..appspot.com + + Then remove the excessively privileged role (Storage Admin / Storage Object + Admin / Storage Object Creator) using: + + gsutil iam ch -d :: gs://artifacts..appspot.com + + where: + can be one of the following: + user, if the is a Google account. + serviceAccount, if specifies a Service account. + can be one of the following: + a Google account (for example, someone@example.com). + a Cloud IAM service account. + + To modify roles defined at the project level and subsequently inherited within the GCR + bucket, or the Service Account User role, extract the IAM policy file, modify it + accordingly and apply it using: + + gcloud projects set-iam-policy + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Container Image repositories (Manual)" + type: "manual" + remediation: | + For Images Hosted in AR: + Using Command Line: + Add artifactregistry.reader role + + gcloud artifacts repositories add-iam-policy-binding \ + --location= \ + --member='serviceAccount:' \ + --role='roles/artifactregistry.reader' + + Remove any roles other than artifactregistry.reader + + gcloud artifacts repositories remove-iam-policy-binding \ + --location \ + --member='serviceAccount:' \ + --role='' + + For Images Hosted in GCR: + For an account explicitly granted to the bucket: + Firstly add read access to the Kubernetes Service Account: + + gsutil iam ch ::objectViewer gs://artifacts..appspot.com + + where: + can be one of the following: + user, if the is a Google account. + serviceAccount, if specifies a Service account. + can be one of the following: + a Google account (for example, someone@example.com). + a Cloud IAM service account. + + Then remove the excessively privileged role (Storage Admin / Storage Object + Admin / Storage Object Creator) using: + + gsutil iam ch -d :: gs://artifacts..appspot.com + + For an account that inherits access to the GCR Bucket through Project level + permissions, modify the Projects IAM policy file accordingly, then upload it using: + + gcloud projects set-iam-policy + scored: false + + - id: 5.1.4 + text: "Ensure only trusted container images are used (Manual)" + type: "manual" + remediation: | + Using Command Line: + Update the cluster to enable Binary Authorization: + + gcloud container cluster update --enable-binauthz + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference: + https://cloud.google.com/binary-authorization/docs/policy-yaml-reference for guidance. + + Import the policy file into Binary Authorization: + + gcloud container binauthz policy import + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Ensure GKE clusters are not running using the Compute Engine default service account (Automated))" + type: "manual" + remediation: | + Using Command Line: + To create a minimally privileged service account: + + gcloud iam service-accounts create \ + --display-name "GKE Node Service Account" + export NODE_SA_EMAIL=gcloud iam service-accounts list \ + --format='value(email)' --filter='displayName:GKE Node Service Account' + + Grant the following roles to the service account: + + export PROJECT_ID=gcloud config get-value project + gcloud projects add-iam-policy-binding --member \ + serviceAccount: --role roles/monitoring.metricWriter + gcloud projects add-iam-policy-binding --member \ + serviceAccount: --role roles/monitoring.viewer + gcloud projects add-iam-policy-binding --member \ + serviceAccount: --role roles/logging.logWriter + + To create a new Node pool using the Service account, run the following command: + + gcloud container node-pools create \ + --service-account=@.iam.gserviceaccount.com \ + --cluster= --zone + + Note: The workloads will need to be migrated to the new Node pool, and the old node + pools that use the default service account should be deleted to complete the + remediation. + scored: false + + - id: 5.2.2 + text: "Prefer using dedicated GCP Service Accounts and Workload Identity (Manual)" + type: "manual" + remediation: | + Using Command Line: + + gcloud container clusters update --zone \ + --workload-pool .svc.id.goog + + Note that existing Node pools are unaffected. New Node pools default to --workload- + metadata-from-node=GKE_METADATA_SERVER. + + Then, modify existing Node pools to enable GKE_METADATA_SERVER: + + gcloud container node-pools update --cluster \ + --zone --workload-metadata=GKE_METADATA + + Workloads may need to be modified in order for them to use Workload Identity as + described within: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity. + Also consider the effects on the availability of hosted workloads as Node pools + are updated. It may be more appropriate to create new Node Pools. + scored: false + + - id: 5.3 + text: "Cloud Key Management Service (Cloud KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using keys managed in Cloud KMS (Automated)" + type: "manual" + remediation: | + To create a key: + Create a key ring: + + gcloud kms keyrings create --location --project \ + + + Create a key: + + gcloud kms keys create --location --keyring \ + --purpose encryption --project + + Grant the Kubernetes Engine Service Agent service account the Cloud KMS + CryptoKey Encrypter/Decrypter role: + + gcloud kms keys add-iam-policy-binding --location \ + --keyring --member serviceAccount: \ + --role roles/cloudkms.cryptoKeyEncrypterDecrypter --project + + To create a new cluster with Application-layer Secrets Encryption: + + gcloud container clusters create --cluster-version=latest \ + --zone \ + --database-encryption-key projects//locations//keyRings//cryptoKeys/ \ + --project + + To enable on an existing cluster: + + gcloud container clusters update --zone \ + --database-encryption-key projects//locations//keyRings//cryptoKeys/ \ + --project + scored: false + + - id: 5.4 + text: "Node Metadata" + checks: + - id: 5.4.1 + text: "Ensure the GKE Metadata Server is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + + gcloud container clusters update --identity-namespace=.svc.id.goog + + Note that existing Node pools are unaffected. New Node pools default to --workload- + metadata-from-node=GKE_METADATA_SERVER. + + To modify an existing Node pool to enable GKE Metadata Server: + + gcloud container node-pools update --cluster= \ + --workload-metadata-from-node=GKE_METADATA_SERVER + + Workloads may need modification in order for them to use Workload Identity as + described within: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity. + scored: false + + - id: 5.5 + text: "Node Configuration and Maintenance" + checks: + - id: 5.5.1 + text: "Ensure Container-Optimized OS (cos_containerd) is used for GKE node images (Automated)" + type: "manual" + remediation: | + Using Command Line: + To set the node image to cos for an existing cluster's Node pool: + + gcloud container clusters upgrade --image-type cos_containerd \ + --zone --node-pool + scored: false + + - id: 5.5.2 + text: "Ensure Node Auto-Repair is enabled for GKE nodes (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable node auto-repair for an existing cluster's Node pool: + + gcloud container node-pools update --cluster \ + --zone --enable-autorepair + scored: false + + - id: 5.5.3 + text: "Ensure Node Auto-Upgrade is enabled for GKE nodes (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable node auto-upgrade for an existing cluster's Node pool, run the following + command: + + gcloud container node-pools update --cluster \ + --zone --enable-autoupgrade + scored: false + + - id: 5.5.4 + text: "When creating New Clusters - Automate GKE version management using Release Channels (Automated)" + type: "manual" + remediation: | + Using Command Line: + Create a new cluster by running the following command: + + gcloud container clusters create --zone \ + --release-channel + + where is stable or regular, according to requirements. + scored: false + + - id: 5.5.5 + text: "Ensure Shielded GKE Nodes are Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To migrate an existing cluster, the flag --enable-shielded-nodes needs to be + specified in the cluster update command: + + gcloud container clusters update --zone \ + --enable-shielded-nodes + scored: false + + - id: 5.5.6 + text: "Ensure Integrity Monitoring for Shielded GKE Nodes is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Integrity Monitoring enabled, run the + following command: + + gcloud container node-pools create --cluster \ + --zone --shielded-integrity-monitoring + + Workloads from existing non-conforming Node pools will need to be migrated to the + newly created Node pool, then delete non-conforming Node pools to complete the + remediation + scored: false + + - id: 5.5.7 + text: "Ensure Secure Boot for Shielded GKE Nodes is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Secure Boot enabled, run the following + command: + + gcloud container node-pools create --cluster \ + --zone --shielded-secure-boot + + Workloads will need to be migrated from existing non-conforming Node pools to the + newly created Node pool, then delete the non-conforming pools. + scored: false + + - id: 5.6 + text: "Cluster Networking" + checks: + - id: 5.6.1 + text: "Enable VPC Flow Logs and Intranode Visibility (Automated)" + type: "manual" + remediation: | + Using Command Line: + 1. Find the subnetwork name associated with the cluster. + + gcloud container clusters describe \ + --region - -format json | jq '.subnetwork' + + 2. Update the subnetwork to enable VPC Flow Logs. + gcloud compute networks subnets update --enable-flow-logs + scored: false + + - id: 5.6.2 + text: "Ensure use of VPC-native clusters (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable Alias IP on a new cluster, run the following command: + + gcloud container clusters create --zone \ + --enable-ip-alias + + If using Autopilot configuration mode: + + gcloud container clusters create-auto \ + --zone + scored: false + + - id: 5.6.3 + text: "Ensure Control Plane Authorized Networks is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable Control Plane Authorized Networks for an existing cluster, run the following + command: + + gcloud container clusters update --zone \ + --enable-master-authorized-networks + + Along with this, you can list authorized networks using the --master-authorized-networks + flag which contains a list of up to 20 external networks that are allowed to + connect to your cluster's control plane through HTTPS. You provide these networks as + a comma-separated list of addresses in CIDR notation (such as 90.90.100.0/24). + scored: false + + - id: 5.6.4 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: | + Using Command Line: + Create a cluster with a Private Endpoint enabled and Public Access disabled by including + the --enable-private-endpoint flag within the cluster create command: + + gcloud container clusters create --enable-private-endpoint + + Setting this flag also requires the setting of --enable-private-nodes, --enable-ip-alias + and --master-ipv4-cidr=. + scored: false + + - id: 5.6.5 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: | + Using Command Line: + To create a cluster with Private Nodes enabled, include the --enable-private-nodes + flag within the cluster create command: + + gcloud container clusters create --enable-private-nodes + + Setting this flag also requires the setting of --enable-ip-alias and + --master-ipv4-cidr=. + scored: false + + - id: 5.6.6 + text: "Consider firewalling GKE worker nodes (Manual)" + type: "manual" + remediation: | + Using Command Line: + Use the following command to generate firewall rules, setting the variables as + appropriate: + + gcloud compute firewall-rules create \ + --network --priority --direction \ + --action --target-tags \ + --target-service-accounts \ + --source-ranges --source-tags \ + --source-service-accounts \ + --destination-ranges --rules + scored: false + + - id: 5.6.7 + text: "Ensure use of Google-managed SSL Certificates (Automated)" + type: "manual" + remediation: | + If services of type:LoadBalancer are discovered, consider replacing the Service with + an Ingress. + + To configure the Ingress and use Google-managed SSL certificates, follow the + instructions as listed at: https://cloud.google.com/kubernetes-engine/docs/how- + to/managed-certs. + scored: false + + - id: 5.7 + text: "Logging" + checks: + - id: 5.7.1 + text: "Ensure Logging and Cloud Monitoring is Enabled (Automated)" + type: "manual" + remediation: | + To enable Logging for an existing cluster, run the following command: + gcloud container clusters update --zone \ + --logging= + + See https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--logging + for a list of available components for logging. + + To enable Cloud Monitoring for an existing cluster, run the following command: + gcloud container clusters update --zone \ + --monitoring= + + See https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#-- + monitoring for a list of available components for Cloud Monitoring. + scored: false + + - id: 5.7.2 + text: "Enable Linux auditd logging (Manual)" + type: "manual" + remediation: | + Using Command Line: + Download the example manifests: + curl https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-node-tools/master/os-audit/cos-auditd-logging.yaml > cos-auditd-logging.yaml + + Edit the example manifests if needed. Then, deploy them: + kubectl apply -f cos-auditd-logging.yaml + + Verify that the logging Pods have started. If a different Namespace was defined in the + manifests, replace cos-auditd with the name of the namespace being used: + kubectl get pods --namespace=cos-auditd + scored: false + + - id: 5.8 + text: "Authentication and Authorization" + checks: + - id: 5.8.1 + text: "Ensure authentication using Client Certificates is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + Create a new cluster without a Client Certificate: + gcloud container clusters create [CLUSTER_NAME] \ + --no-issue-client-certificate + scored: false + + - id: 5.8.2 + text: "Manage Kubernetes RBAC users with Google Groups for GKE (Manual)" + type: "manual" + remediation: | + Using Command Line: + Follow the G Suite Groups instructions at: https://cloud.google.com/kubernetes- + engine/docs/how-to/role-based-access-control#google-groups-for-gke. + + Then, create a cluster with: + gcloud container clusters create --security-group + + Finally create Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings that + reference the G Suite Groups. + scored: false + + - id: 5.8.3 + text: "Ensure Legacy Authorization (ABAC) is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To disable Legacy Authorization for an existing cluster, run the following command: + gcloud container clusters update --zone \ + --no-enable-legacy-authorization + scored: false + + - id: 5.9 + text: "Storage" + checks: + - id: 5.9.1 + text: "Enable Customer-Managed Encryption Keys (CMEK) for GKE Persistent Disks (PD) (Manual)" + type: "manual" + remediation: | + Using Command Line: + Follow the instructions detailed at: https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek. + scored: false + + - id: 5.9.2 + text: "Enable Customer-Managed Encryption Keys (CMEK) for Boot Disks (Automated)" + type: "manual" + remediation: | + Using Command Line: + Create a new node pool using customer-managed encryption keys for the node boot + disk, of either pd-standard or pd-ssd: + gcloud container node-pools create --disk-type \ + --boot-disk-kms-key projects//locations//keyRings//cryptoKeys/ + + Create a cluster using customer-managed encryption keys for the node boot disk, of + either pd-standard or pd-ssd: + gcloud container clusters create --disk-type \ + --boot-disk-kms-key projects//locations//keyRings//cryptoKeys/ + scored: false + + - id: 5.10 + text: "Other Cluster Configurations" + checks: + - id: 5.10.1 + text: "Ensure Kubernetes Web UI is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To disable the Kubernetes Dashboard on an existing cluster, run the following + command: + gcloud container clusters update --zone \ + --update-addons=KubernetesDashboard=DISABLED + scored: false + + - id: 5.10.2 + text: "Ensure that Alpha clusters are not used for production workloads (Automated)" + type: "manual" + remediation: | + Using Command Line: + Upon creating a new cluster + gcloud container clusters create [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] + + Do not use the --enable-kubernetes-alpha argument. + scored: false + + - id: 5.10.3 + text: "Consider GKE Sandbox for running untrusted workloads (Manual)" + type: "manual" + remediation: | + Using Command Line: + To enable GKE Sandbox on an existing cluster, a new Node pool must be created, + which can be done using: + gcloud container node-pools create --zone \ + --cluster --image-type=cos_containerd --sandbox="type=gvisor" + scored: false + + - id: 5.10.4 + text: "Ensure use of Binary Authorization (Automated)" + type: "manual" + remediation: | + Using Command Line: + Update the cluster to enable Binary Authorization: + gcloud container cluster update --zone \ + --binauthz-evaluation-mode= + + Example: + gcloud container clusters update $CLUSTER_NAME --zone $COMPUTE_ZONE \ + --binauthz-evaluation-mode=PROJECT_SINGLETON_POLICY_ENFORCE + + See: https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--binauthz-evaluation-mode + for more details around the evaluation modes available. + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference: + https://cloud.google.com/binary-authorization/docs/policy-yaml-reference for guidance. + + Import the policy file into Binary Authorization: + gcloud container binauthz policy import + scored: false + + - id: 5.10.5 + text: "Enable Security Posture (Manual)" + type: "manual" + remediation: | + Enable security posture via the UI, gCloud or API. + https://cloud.google.com/kubernetes-engine/docs/how-to/protect-workload-configuration + scored: false diff --git a/cfg/gke-1.6.0/master.yaml b/cfg/gke-1.6.0/master.yaml new file mode 100644 index 000000000..9686bf2f8 --- /dev/null +++ b/cfg/gke-1.6.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "gke-1.6.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/gke-1.6.0/node.yaml b/cfg/gke-1.6.0/node.yaml new file mode 100644 index 000000000..bac80720e --- /dev/null +++ b/cfg/gke-1.6.0/node.yaml @@ -0,0 +1,506 @@ +--- +controls: +version: "gke-1.6.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on each worker node. + For example, + + chmod 644 $proxykubeconfig + scored: true + + - id: 3.1.2 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on each worker node. + For example: + + chown root:root $proxykubeconfig + scored: true + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 600 (Manual)" + audit: '/bin/sh -c ''if test -e /home/kubernetes/kubelet-config.yaml; then stat -c permissions=%a /home/kubernetes/kubelet-config.yaml; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the kubelet config file location) + + chmod 644 /home/kubernetes/kubelet-config.yaml + scored: true + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e /home/kubernetes/kubelet-config.yaml; then stat -c %U:%G /home/kubernetes/kubelet-config.yaml; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identied in the Audit step) + + chown root:root /home/kubernetes/kubelet-config.yaml + scored: true + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the Anonymous Auth is Not Enabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + Remediation Method 1: + If configuring via the Kubelet config file, you first need to locate the file. + To do this, SSH to each node and execute the following command to find the kubelet + process: + + ps -ef | grep kubelet + + The output of the above command provides details of the active kubelet process, from + which we can see the location of the configuration file provided to the kubelet service + with the --config argument. The file can be viewed with a command such as more or + less, like so: + + sudo less /home/kubernetes/kubelet-config.yaml + + Disable Anonymous Authentication by setting the following parameter: + + "authentication": { "anonymous": { "enabled": false } } + + Remediation Method 2: + If using executable arguments, edit the kubelet service file on each worker node and + ensure the below parameters are part of the KUBELET_ARGS variable string. + + For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or + Bottlerocket AMIs, then this file can be found at + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise, + you may need to look up documentation for your chosen operating system to determine + which service manager is configured: + + --anonymous-auth=false + + For Both Remediation Steps: + Based on your system, restart the kubelet service and check the service status. + The following example is for operating systems using systemd, such as the Amazon + EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl + command. If systemctl is not available then you will need to look up documentation for + your chosen operating system to determine which service manager is configured: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + Remediation Method 1: + If configuring via the Kubelet config file, you first need to locate the file. + To do this, SSH to each node and execute the following command to find the kubelet + process: + + ps -ef | grep kubelet + + The output of the above command provides details of the active kubelet process, from + which we can see the location of the configuration file provided to the kubelet service + with the --config argument. The file can be viewed with a command such as more or + less, like so: + + sudo less /path/to/kubelet-config.json + + Enable Webhook Authentication by setting the following parameter: + + "authentication": { "webhook": { "enabled": true } } + + Next, set the Authorization Mode to Webhook by setting the following parameter: + + "authorization": { "mode": "Webhook } + + Finer detail of the authentication and authorization fields can be found in the + Kubelet Configuration documentation (https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/). + + Remediation Method 2: + If using executable arguments, edit the kubelet service file on each worker node and + ensure the below parameters are part of the KUBELET_ARGS variable string. + For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or + Bottlerocket AMIs, then this file can be found at + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise, + you may need to look up documentation for your chosen operating system to determine + which service manager is configured: + + --authentication-token-webhook + --authorization-mode=Webhook + + For Both Remediation Steps: + Based on your system, restart the kubelet service and check the service status. + The following example is for operating systems using systemd, such as the Amazon + EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl + command. If systemctl is not available then you will need to look up documentation for + your chosen operating system to determine which service manager is configured: + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.3 + text: "Ensure that a Client CA File is Configured (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + Remediation Method 1: + If configuring via the Kubelet config file, you first need to locate the file. + To do this, SSH to each node and execute the following command to find the kubelet + process: + + ps -ef | grep kubelet + + The output of the above command provides details of the active kubelet process, from + which we can see the location of the configuration file provided to the kubelet service + with the --config argument. The file can be viewed with a command such as more or + less, like so: + + sudo less /path/to/kubelet-config.json + + Configure the client certificate authority file by setting the following parameter + appropriately: + + "authentication": { "x509": {"clientCAFile": } }" + + Remediation Method 2: + If using executable arguments, edit the kubelet service file on each worker node and + ensure the below parameters are part of the KUBELET_ARGS variable string. + For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or + Bottlerocket AMIs, then this file can be found at + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise, + you may need to look up documentation for your chosen operating system to determine + which service manager is configured: + + --client-ca-file= + + For Both Remediation Steps: + Based on your system, restart the kubelet service and check the service status. + The following example is for operating systems using systemd, such as the Amazon + EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl + command. If systemctl is not available then you will need to look up documentation for + your chosen operating system to determine which service manager is configured: + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is disabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + bin_op: or + remediation: | + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to 0 + + "readOnlyPort": 0 + + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --read-only-port=0 + + For each remediation: + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet-config.yaml and set the below parameter to a non-zero + value in the format of #h#m#s + + "streamingConnectionIdleTimeout": "4h0m0s" + + You should ensure that the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not + specify a --streaming-connection-idle-timeout argument because it would + override the Kubelet config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --streaming-connection-idle-timeout=4h0m0s + + Remediation Method 3: + If using the api configz endpoint consider searching for the status of + "streamingConnectionIdleTimeout": by extracting the live configuration from the + nodes running kubelet. + **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a + Live Cluster (https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), + and then rerun the curl statement from audit process to check for kubelet + configuration changes + + kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from + "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + + For all three remediations: + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-utils-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to + true + + "makeIPTablesUtilChains": true + + Ensure that /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf + does not set the --make-iptables-util-chains argument because that would + override your Kubelet config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --make-iptables-util-chains:true + + Remediation Method 3: + If using the api configz endpoint consider searching for the status of + "makeIPTablesUtilChains.: true by extracting the live configuration from the nodes + running kubelet. + + **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a + Live Cluster (https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), + and then rerun the curl statement from audit process to check for kubelet + configuration changes + + kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from + "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + + For all three remediations: + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.7 + text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /etc/systemd/system/kubelet.service.d/10-kubeadm.conf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + + If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + + Based on your system, restart the kubelet service. For example: + + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.8 + text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.yaml file + /etc/kubernetes/kubelet/kubelet-config.yaml and set the below parameter to + true + + "RotateCertificate":true + + Additionally, ensure that the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate + executable argument to false because this would override the Kubelet + config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --RotateCertificate=true + scored: true + + - id: 3.2.9 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: eq + value: true + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet-config.yaml and set the below parameter to true + + "featureGates": { + "RotateKubeletServerCertificate":true + }, + + Additionally, ensure that the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set + the --rotate-kubelet-server-certificate executable argument to false because + this would override the Kubelet config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --rotate-kubelet-server-certificate=true + + Remediation Method 3: + If using the api configz endpoint consider searching for the status of + "RotateKubeletServerCertificate": by extracting the live configuration from the + nodes running kubelet. + **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a + Live Cluster (https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), + and then rerun the curl statement from audit process to check for kubelet + configuration changes + + kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from + "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + + For all three remediation methods: + Restart the kubelet service and check status. The example below is for when using + systemctl to manage services: + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true diff --git a/cfg/gke-1.6.0/policies.yaml b/cfg/gke-1.6.0/policies.yaml new file mode 100644 index 000000000..333335536 --- /dev/null +++ b/cfg/gke-1.6.0/policies.yaml @@ -0,0 +1,238 @@ +--- +controls: +version: "gke-1.6.0" +id: 4 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Automated)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Automated)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Automated)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Ensure that default service accounts are not actively used (Automated)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific + access to the Kubernetes API server. + + Modify the configuration of each default service account to include this value + + automountServiceAccountToken: false + scored: false + + - id: 4.1.5 + text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.1.6 + text: "Avoid use of system:masters group (Automated)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 4.1.7 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 4.1.8 + text: "Avoid bindings to system:anonymous (Automated)" + type: "manual" + remediation: | + Identify all clusterrolebindings and rolebindings to the user system:anonymous. + Check if they are used and review the permissions associated with the binding using the + commands in the Audit section above or refer to GKE documentation + (https://cloud.google.com/kubernetes-engine/docs/best-practices/rbac#detect-prevent-default). + + Strongly consider replacing unsafe bindings with an authenticated, user-defined group. + Where possible, bind to non-default, user-defined groups with least-privilege roles. + + If there are any unsafe bindings to the user system:anonymous, proceed to delete them + after consideration for cluster operations with only necessary, safer bindings. + + kubectl delete clusterrolebinding [CLUSTER_ROLE_BINDING_NAME] + kubectl delete rolebinding [ROLE_BINDING_NAME] --namespace [ROLE_BINDING_NAMESPACE] + scored: false + + - id: 4.1.9 + text: "Avoid non-default bindings to system:unauthenticated (Automated)" + type: "manual" + remediation: | + Identify all non-default clusterrolebindings and rolebindings to the group + system:unauthenticated. Check if they are used and review the permissions + associated with the binding using the commands in the Audit section above or refer to + GKE documentation (https://cloud.google.com/kubernetes-engine/docs/best-practices/rbac#detect-prevent-default). + + Strongly consider replacing non-default, unsafe bindings with an authenticated, user- + defined group. Where possible, bind to non-default, user-defined groups with least- + privilege roles. + + If there are any non-default, unsafe bindings to the group system:unauthenticated, + proceed to delete them after consideration for cluster operations with only necessary, + safer bindings. + + kubectl delete clusterrolebinding [CLUSTER_ROLE_BINDING_NAME] + kubectl delete rolebinding [ROLE_BINDING_NAME] --namespace [ROLE_BINDING_NAMESPACE] + scored: false + + - id: 4.1.10 + text: "Avoid non-default bindings to system:authenticated (Automated)" + type: "manual" + remediation: | + Identify all non-default clusterrolebindings and rolebindings to the group + system:authenticated. Check if they are used and review the permissions associated + with the binding using the commands in the Audit section above or refer to GKE + documentation. + + Strongly consider replacing non-default, unsafe bindings with an authenticated, user- + defined group. Where possible, bind to non-default, user-defined groups with least- + privilege roles. + + If there are any non-default, unsafe bindings to the group system:authenticated, + proceed to delete them after consideration for cluster operations with only necessary, + safer bindings. + + kubectl delete clusterrolebinding [CLUSTER_ROLE_BINDING_NAME] + kubectl delete rolebinding [ROLE_BINDING_NAME] --namespace [ROLE_BINDING_NAMESPACE] + scored: false + + - id: 4.2 + text: "Pod Security Standards" + checks: + - id: 4.2.1 + text: "Ensure that the cluster enforces Pod Security Standard Baseline profile or stricter for all namespaces. (Manual)" + type: "manual" + remediation: | + Ensure that Pod Security Admission is in place for every namespace which contains + user workloads. + Run the following command to enforce the Baseline profile in a namespace: + + kubectl label namespace pod-security.kubernetes.io/enforce=baseline + scored: false + + - id: 4.3 + text: "Network Policies and CNI" + checks: + - id: 4.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + To use a CNI plugin with Network Policy, enable Network Policy in GKE, and the CNI plugin + will be updated. See Recommendation 5.6.7. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Automated)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as needed. + See: https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy#creating_a_network_policy + for more information. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Automated)" + type: "manual" + remediation: | + if possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "Extensible Admission Control" + checks: + - id: 4.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + Also see recommendation 5.10.4. + scored: false + + - id: 4.6 + text: "General Policies" + checks: + - id: 4.6.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.6.2 + text: "Ensure that the seccomp profile is set to RuntimeDefault in your pod definitions (Automated)" + type: "manual" + remediation: | + Use security context to enable the RuntimeDefault seccomp profile in your pod + definitions. An example is as below: + + { + "namespace": "kube-system", + "name": "metrics-server-v0.7.0-dbcc8ddf6-gz7d4", + "seccompProfile": "RuntimeDefault" + } + scored: false + + - id: 4.6.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Google Container- + Optimized OS Benchmark. + scored: false + + - id: 4.6.4 + text: "The default namespace should not be used (Automated)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/k3s-cis-1.23/config.yaml b/cfg/k3s-cis-1.23/config.yaml new file mode 100644 index 000000000..d6deb1ce6 --- /dev/null +++ b/cfg/k3s-cis-1.23/config.yaml @@ -0,0 +1,47 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - apiserver + - scheduler + - controllermanager + - etcd + - policies + + apiserver: + bins: + - containerd + + scheduler: + bins: + - containerd + + controllermanager: + bins: + - containerd + + etcd: + bins: + - containerd + datadirs: + - /var/lib/rancher/k3s/server/db/etcd + node: + components: + - kubelet + - proxy + + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + + policies: + components: + - policies diff --git a/cfg/k3s-cis-1.23/controlplane.yaml b/cfg/k3s-cis-1.23/controlplane.yaml new file mode 100644 index 000000000..aeee0a77c --- /dev/null +++ b/cfg/k3s-cis-1.23/controlplane.yaml @@ -0,0 +1,47 @@ +--- +controls: +version: "k3s-cis-1.23" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + type: "manual" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/k3s-cis-1.23/etcd.yaml b/cfg/k3s-cis-1.23/etcd.yaml new file mode 100644 index 000000000..54cfd0816 --- /dev/null +++ b/cfg/k3s-cis-1.23/etcd.yaml @@ -0,0 +1,137 @@ +--- +controls: +version: "k3s-cis-1.23" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "grep -A 4 'client-transport-security' $etcdconf | grep -E 'cert-file|key-file'" + tests: + bin_op: and + test_items: + - flag: "cert-file" + set: true + - flag: "key-file" + set: true + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "grep -A 4 'client-transport-security' $etcdconf | grep 'client-cert-auth'" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + set: true + - flag: "client-cert-auth" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "if grep -q '^auto-tls' $etcdconf;then grep '^auto-tls' $etcdconf;else echo 'notset';fi" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + set: false + - flag: "--auto-tls" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" + audit: "grep -A 4 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file'" + tests: + bin_op: and + test_items: + - flag: "cert-file" + set: true + - flag: "key-file" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "grep -A 4 'peer-transport-security' $etcdconf | grep 'client-cert-auth'" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + set: true + - flag: "client-cert-auth" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "if grep -q '^peer-auto-tls' $etcdconf;then grep '^peer-auto-tls' $etcdconf;else echo 'notset';fi" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + set: false + - flag: "--peer-auto-tls" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "if grep -q 'trusted-ca-file' $etcdconf;then grep 'trusted-ca-file' $etcdconf;else echo 'notset';fi" + tests: + test_items: + - flag: "trusted-ca-file" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/k3s-cis-1.23/master.yaml b/cfg/k3s-cis-1.23/master.yaml new file mode 100644 index 000000000..0209dc73e --- /dev/null +++ b/cfg/k3s-cis-1.23/master.yaml @@ -0,0 +1,986 @@ +--- +controls: +version: "k3s-cis-1.23" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 644 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c permissions=%a $etcdconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" + type: "skip" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + type: "skip" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: "stat -c %a $etcddatadir" + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + type: "skip" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig'" + type: "skip" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /var/lib/rancher/k3s/server/tls | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual)" + audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.crt" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 644 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.key" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + type: manual + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-https'" + type: "skip" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep -m1 'Running kube-apiserver'" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: "grep aescbc /path/to/encryption-config.json" + type: "manual" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'" + type: "skip" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/k3s-cis-1.23/node.yaml b/cfg/k3s-cis-1.23/node.yaml new file mode 100644 index 000000000..9c20b229e --- /dev/null +++ b/cfg/k3s-cis-1.23/node.yaml @@ -0,0 +1,490 @@ +--- +controls: +version: "k3s-cis-1.23" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 644 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" + audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" + audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubelet.kubeconfig ' + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: 'stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig' + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)" + audit: "stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt" + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + - flag: "640" + compare: + op: eq + value: "640" + set: true + - flag: "600" + compare: + op: eq + value: "600" + set: true + - flag: "444" + compare: + op: eq + value: "444" + set: true + - flag: "440" + compare: + op: eq + value: "440" + set: true + - flag: "400" + compare: + op: eq + value: "400" + set: true + - flag: "000" + compare: + op: eq + value: "000" + set: true + bin_op: or + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 644 + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt" + tests: + test_items: + - flag: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + set: true + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' ' + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + set: true + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults'" + type: "skip" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + set: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'" + type: "skip" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + set: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + type: "skip" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + type: "manual" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --tls-cert-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.crt' + - flag: --tls-private-key-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + type: "skip" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + type: "skip" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + type: "manual" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/k3s-cis-1.23/policies.yaml b/cfg/k3s-cis-1.23/policies.yaml new file mode 100644 index 000000000..7faffa3ee --- /dev/null +++ b/cfg/k3s-cis-1.23/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "k3s-cis-1.23" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/k3s-cis-1.24/config.yaml b/cfg/k3s-cis-1.24/config.yaml new file mode 100644 index 000000000..4adbac3a1 --- /dev/null +++ b/cfg/k3s-cis-1.24/config.yaml @@ -0,0 +1,58 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - apiserver + - scheduler + - controllermanager + - etcd + - policies + + apiserver: + bins: + - containerd + + scheduler: + bins: + - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig + + controllermanager: + bins: + - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/controller.kubeconfig + + + etcd: + bins: + - containerd + +etcd: + components: + - etcd + + etcd: + confs: /var/lib/rancher/k3s/server/db/etcd/config + +node: + components: + - kubelet + - proxy + + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + +policies: + components: + - policies diff --git a/cfg/k3s-cis-1.24/controlplane.yaml b/cfg/k3s-cis-1.24/controlplane.yaml new file mode 100644 index 000000000..471017bd7 --- /dev/null +++ b/cfg/k3s-cis-1.24/controlplane.yaml @@ -0,0 +1,46 @@ +--- +controls: +version: "k3s-cis-1.24" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/k3s-cis-1.24/etcd.yaml b/cfg/k3s-cis-1.24/etcd.yaml new file mode 100644 index 000000000..cb57781c0 --- /dev/null +++ b/cfg/k3s-cis-1.24/etcd.yaml @@ -0,0 +1,144 @@ +--- +controls: +version: "k3s-cis-1.24" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: and + test_items: + - path: "{.client-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt" + - path: "{.client-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom cert and key files. + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.client-transport-security.client-cert-auth}" + compare: + op: eq + value: true + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable client certificate authentication. + scored: false + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - path: "{.client-transport-security.auto-tls}" + compare: + op: eq + value: false + - path: "{.client-transport-security.auto-tls}" + set: false + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + client-transport-security: + auto-tls: false + scored: false + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: and + test_items: + - path: "{.peer-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt" + - path: "{.peer-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates peer cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom peer cert and key files. + scored: false + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.peer-transport-security.client-cert-auth}" + compare: + op: eq + value: true + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable peer client certificate authentication. + scored: false + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - path: "{.peer-transport-security.auto-tls}" + compare: + op: eq + value: false + - path: "{.peer-transport-security.auto-tls}" + set: false + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + peer-transport-security: + auto-tls: false + scored: false + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates a unique certificate authority for etcd. + This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use a shared certificate authority. + scored: false diff --git a/cfg/k3s-cis-1.24/master.yaml b/cfg/k3s-cis-1.24/master.yaml new file mode 100644 index 000000000..5ff0318c9 --- /dev/null +++ b/cfg/k3s-cis-1.24/master.yaml @@ -0,0 +1,1021 @@ +--- +controls: +version: "k3s-cis-1.24" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c permissions=%a $etcdconf; fi'" + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + type: "skip" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated)" + audit: find /var/lib/cni/networks -type f ! -name lock 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + type: "skip" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + The default K3s CNI, flannel, does not create any files in /var/lib/cni/networks. + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)" + audit: find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G\ + type: "skip" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + The default K3s CNI, flannel, does not create any files in /var/lib/cni/networks. + scored: true + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd + else + echo "permissions=700" + fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + type: "skip" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + Not Applicable. + For K3s, etcd is embedded within the k3s process. There is no separate etcd process. + Therefore the etcd data directory ownership is managed by the k3s process and should be root:root. + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + type: "skip" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /var/lib/rancher/k3s/server/tls | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.crt'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key + scored: true + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --anonymous-auth argument to false. If it is set to true, + edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "anonymous-auth=true" + scored: true + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "token-auth-file=" + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s does not set DenyServiceExternalIPs. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + type: "skip" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + By default, K3s automatically provides the kubelet client certificate and key. + They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters. + kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Permissive, per CIS guidelines, + "This setting could impact offline or isolated clusters, which have images pre-loaded and + do not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter. + kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'ServiceAccount'" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. + Follow the documentation and create ServiceAccount objects as per your environment. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + By default, K3s sets the --enable-admission-plugins to NodeRestriction. + If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins. + If you are, include NodeRestriction in the list. + kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + By default, K3s sets the secure port to 6444. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "secure-port=" + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "profiling=true" + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-path'" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + kube-apiserver-arg: + - "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log" + scored: false + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxage'" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example, + kube-apiserver-arg: + - "audit-log-maxage=30" + scored: false + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxbackup'" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example, + kube-apiserver-arg: + - "audit-log-maxbackup=10" + scored: false + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxsize'" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxsize parameter to an appropriate size in MB. For example, + kube-apiserver-arg: + - "audit-log-maxsize=100" + scored: false + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'request-timeout'" + tests: + bin_op: or + test_items: + - flag: "--request-timeout" + set: false + - flag: "--request-timeout" + remediation: | + Permissive, per CIS guidelines, + "it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". + Edit the K3s config file /etc/rancher/k3s/config.yaml + and set the below parameter if needed. For example, + kube-apiserver-arg: + - "request-timeout=300s" + scored: false + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + By default, K3s does not set the --service-account-lookup argument. + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example, + kube-apiserver-arg: + - "service-account-lookup=true" + Alternatively, you can delete the service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'service-account-key-file'" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + K3s automatically generates and sets the service account key file. + It is located at /var/lib/rancher/k3s/server/tls/service.key. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "service-account-key-file=" + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + journalctl -m -u k3s | grep -m1 'Running kube-apiserver' | tail -n1 + else + echo "--etcd-certfile AND --etcd-keyfile" + fi + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + K3s automatically generates and sets the etcd certificate and key files. + They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver. + They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + By default, K3s automatically provides the client certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "client-ca-file=" + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + By default, K3s automatically provides the etcd certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-cafile=" + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + K3s can be configured to use encryption providers to encrypt secrets at rest. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json. + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. + If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. + If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following: + kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + scored: true + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node + and set the --terminated-pod-gc-threshold to an appropriate threshold, + kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "profiling=true" + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + By default, K3s sets the --use-service-account-credentials argument to true. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "use-service-account-credentials=false" + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + By default, K3s automatically provides the service account private key file. + It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "service-account-private-key-file=" + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + By default, K3s automatically provides the root CA file. + It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "root-ca-file=" + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'bind-address'" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "bind-address=" + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "profiling=true" + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "bind-address=" + scored: true diff --git a/cfg/k3s-cis-1.24/node.yaml b/cfg/k3s-cis-1.24/node.yaml new file mode 100644 index 000000000..a9f1e0386 --- /dev/null +++ b/cfg/k3s-cis-1.24/node.yaml @@ -0,0 +1,427 @@ +--- +controls: +version: "k3s-cis-1.24" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: root:root + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: 'stat -c %U:%G $proxykubeconfig' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: 'stat -c %U:%G $kubeletkubeconfig' + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a $kubeletcafile" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 $kubeletcafile + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $kubeletcafile" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root $kubeletcafile + scored: true + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: root:root + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + set: true + remediation: | + By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you + should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "anonymous-auth=true" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="anonymous-auth=true" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' ' + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + set: true + remediation: | + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "authorization-mode=AlwaysAllow" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="authorization-mode=AlwaysAllow" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + By default, K3s automatically provides the client ca certificate for the Kubelet. + It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you + should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "read-only-port=XXXX" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="read-only-port=XXXX" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "streaming-connection-idle-timeout=5m" + If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + set: true + remediation: | + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. + protect-kernel-defaults: true + If using the command line, run K3s with --protect-kernel-defaults=true. + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + set: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. + kubelet-arg: + - "make-iptables-util-chains=true" + If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Automated)" + type: "skip" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Not Applicable. + By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply + with cloud providers that require this flag to ensure that hostname matches node names. + scored: true + + - id: 4.2.9 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + By default, K3s sets the event-qps to 0. Should you wish to change this, + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "event-qps=" + If using the command line, run K3s with --kubelet-arg="event-qps=". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --tls-cert-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.crt' + - flag: --tls-private-key-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' + remediation: | + By default, K3s automatically provides the TLS certificate and private key for the Kubelet. + They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter. + If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter. + If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `tlsCipherSuites` to + kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + or to a subset of these values. + If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=" + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false diff --git a/cfg/k3s-cis-1.24/policies.yaml b/cfg/k3s-cis-1.24/policies.yaml new file mode 100644 index 000000000..4a4f9887b --- /dev/null +++ b/cfg/k3s-cis-1.24/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "k3s-cis-1.24" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/k3s-cis-1.7/config.yaml b/cfg/k3s-cis-1.7/config.yaml new file mode 100644 index 000000000..ed8124f08 --- /dev/null +++ b/cfg/k3s-cis-1.7/config.yaml @@ -0,0 +1,64 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - apiserver + - kubelet + - scheduler + - controllermanager + - etcd + - policies + + apiserver: + bins: + - containerd + + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + + scheduler: + bins: + - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig + + controllermanager: + bins: + - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/controller.kubeconfig + + etcd: + bins: + - containerd + +etcd: + components: + - etcd + + etcd: + confs: /var/lib/rancher/k3s/server/db/etcd/config + +node: + components: + - kubelet + - proxy + + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + +policies: + components: + - policies diff --git a/cfg/k3s-cis-1.7/controlplane.yaml b/cfg/k3s-cis-1.7/controlplane.yaml new file mode 100644 index 000000000..b23a04a15 --- /dev/null +++ b/cfg/k3s-cis-1.7/controlplane.yaml @@ -0,0 +1,60 @@ +--- +controls: +version: "k3s-cis-1.7" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/k3s-cis-1.7/etcd.yaml b/cfg/k3s-cis-1.7/etcd.yaml new file mode 100644 index 000000000..004812013 --- /dev/null +++ b/cfg/k3s-cis-1.7/etcd.yaml @@ -0,0 +1,144 @@ +--- +controls: +version: "k3s-cis-1.7" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: and + test_items: + - path: "{.client-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt" + - path: "{.client-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom cert and key files. + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.client-transport-security.client-cert-auth}" + compare: + op: eq + value: true + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable client certificate authentication. + scored: false + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - path: "{.client-transport-security.auto-tls}" + compare: + op: eq + value: false + - path: "{.client-transport-security.auto-tls}" + set: false + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + client-transport-security: + auto-tls: false + scored: false + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: and + test_items: + - path: "{.peer-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt" + - path: "{.peer-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates peer cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom peer cert and key files. + scored: false + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.peer-transport-security.client-cert-auth}" + compare: + op: eq + value: true + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable peer client certificate authentication. + scored: false + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - path: "{.peer-transport-security.auto-tls}" + compare: + op: eq + value: false + - path: "{.peer-transport-security.auto-tls}" + set: false + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + peer-transport-security: + auto-tls: false + scored: false + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates a unique certificate authority for etcd. + This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use a shared certificate authority. + scored: false diff --git a/cfg/k3s-cis-1.7/master.yaml b/cfg/k3s-cis-1.7/master.yaml new file mode 100644 index 000000000..9e2ef0880 --- /dev/null +++ b/cfg/k3s-cis-1.7/master.yaml @@ -0,0 +1,1000 @@ +--- +controls: +version: "k3s-cis-1.7" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: find /var/lib/cni/networks -type f ! -name lock 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + By default, K3s sets the CNI file permissions to 644. + Note that for many CNIs, a lock file is created with permissions 750. This is expected and can be ignored. + If you modify your CNI configuration, ensure that the permissions are set to 600. + For example, chmod 600 /var/lib/cni/networks/ + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)" + audit: find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root /var/lib/cni/networks/ + scored: true + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd + else + echo "permissions=700" + fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 $etcddatadir + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + type: "skip" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + Not Applicable. + For K3s, etcd is embedded within the k3s process. There is no separate etcd process. + Therefore the etcd data directory ownership is managed by the k3s process and should be root:root. + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $controllermanagerkubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /var/lib/rancher/k3s/server/tls + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.crt'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key + scored: true + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --anonymous-auth argument to false. If it is set to true, + edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "anonymous-auth=true" + scored: true + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "token-auth-file=" + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s does not set DenyServiceExternalIPs. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + By default, K3s automatically provides the kubelet client certificate and key. + They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + By default, K3s automatically provides the kubelet CA cert file, at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "kubelet-certificate-authority=" + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters. + kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Permissive, per CIS guidelines, + "This setting could impact offline or isolated clusters, which have images pre-loaded and + do not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter. + kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Not Applicable. + Enabling Pod Security Policy is no longer supported on K3s v1.25+ and will cause applications to unexpectedly fail. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. + Follow the documentation and create ServiceAccount objects as per your environment. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + By default, K3s sets the --enable-admission-plugins to NodeRestriction. + If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins. + If you are, include NodeRestriction in the list. + kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 - NoteThis recommendation is obsolete and will be deleted per the consensus process (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + By default, K3s sets the secure port to 6444. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "secure-port=" + scored: true + + - id: 1.2.17 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "profiling=true" + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + kube-apiserver-arg: + - "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log" + scored: false + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example, + kube-apiserver-arg: + - "audit-log-maxage=30" + scored: false + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example, + kube-apiserver-arg: + - "audit-log-maxbackup=10" + scored: false + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxsize parameter to an appropriate size in MB. For example, + kube-apiserver-arg: + - "audit-log-maxsize=100" + scored: false + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--request-timeout" + remediation: | + Permissive, per CIS guidelines, + "it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". + Edit the K3s config file /etc/rancher/k3s/config.yaml + and set the below parameter if needed. For example, + kube-apiserver-arg: + - "request-timeout=300s" + scored: false + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + By default, K3s does not set the --service-account-lookup argument. + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example, + kube-apiserver-arg: + - "service-account-lookup=true" + Alternatively, you can delete the service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + K3s automatically generates and sets the service account key file. + It is located at /var/lib/rancher/k3s/server/tls/service.key. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "service-account-key-file=" + scored: true + + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + journalctl -m -u k3s | grep -m1 'Running kube-apiserver' | tail -n1 + else + echo "--etcd-certfile AND --etcd-keyfile" + fi + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + K3s automatically generates and sets the etcd certificate and key files. + They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" + scored: true + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver. + They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + By default, K3s automatically provides the client certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "client-ca-file=" + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + By default, K3s automatically provides the etcd certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-cafile=" + scored: true + + - id: 1.2.29 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + K3s can be configured to use encryption providers to encrypt secrets at rest. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json. + scored: false + + - id: 1.2.30 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json + scored: false + + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. + If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. + If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following: + kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + scored: true + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node + and set the --terminated-pod-gc-threshold to an appropriate threshold, + kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "profiling=true" + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + By default, K3s sets the --use-service-account-credentials argument to true. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "use-service-account-credentials=false" + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + By default, K3s automatically provides the service account private key file. + It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "service-account-private-key-file=" + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + By default, K3s automatically provides the root CA file. + It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "root-ca-file=" + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "bind-address=" + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "profiling=true" + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "bind-address=" + scored: true diff --git a/cfg/k3s-cis-1.7/node.yaml b/cfg/k3s-cis-1.7/node.yaml new file mode 100644 index 000000000..b873aea68 --- /dev/null +++ b/cfg/k3s-cis-1.7/node.yaml @@ -0,0 +1,422 @@ +--- +controls: +version: "k3s-cis-1.7" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + Not Applicable. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: 'stat -c %U:%G $kubeletkubeconfig' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a $kubeletcafile" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 $kubeletcafile + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $kubeletcafile" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root $kubeletcafile + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you + should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "anonymous-auth=true" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="anonymous-auth=true" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' ' + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "authorization-mode=AlwaysAllow" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="authorization-mode=AlwaysAllow" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + By default, K3s automatically provides the client ca certificate for the Kubelet. + It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you + should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "read-only-port=XXXX" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="read-only-port=XXXX" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "streaming-connection-idle-timeout=5m" + If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. + kubelet-arg: + - "make-iptables-util-chains=true" + If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + type: "skip" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Not Applicable. + By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply + with cloud providers that require this flag to ensure that hostname matches node names. + scored: true + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + By default, K3s sets the event-qps to 0. Should you wish to change this, + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "event-qps=" + If using the command line, run K3s with --kubelet-arg="event-qps=". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --tls-cert-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.crt' + - flag: --tls-private-key-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' + remediation: | + By default, K3s automatically provides the TLS certificate and private key for the Kubelet. + They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key + If for some reason you need to provide your own certificate and key, you can set the + the below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter. + If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter. + If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `tlsCipherSuites` to + kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + or to a subset of these values. + If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=" + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `podPidsLimit` to + kubelet-arg: + - "pod-max-pids=" + scored: false diff --git a/cfg/k3s-cis-1.7/policies.yaml b/cfg/k3s-cis-1.7/policies.yaml new file mode 100644 index 000000000..f4d4a24e8 --- /dev/null +++ b/cfg/k3s-cis-1.7/policies.yaml @@ -0,0 +1,300 @@ +--- +controls: +version: "k3s-cis-1.7" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: true + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/k3s-cis-1.8/config.yaml b/cfg/k3s-cis-1.8/config.yaml new file mode 100644 index 000000000..fff6edb7d --- /dev/null +++ b/cfg/k3s-cis-1.8/config.yaml @@ -0,0 +1,54 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - apiserver + - kubelet + - scheduler + - controllermanager + - etcd + - policies + apiserver: + bins: + - containerd + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + scheduler: + bins: + - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig + controllermanager: + bins: + - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/controller.kubeconfig + etcd: + bins: + - containerd + +etcd: + confs: /var/lib/rancher/k3s/server/db/etcd/config + +node: + components: + - kubelet + - proxy + kubelet: + bins: + - containerd + confs: + - /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig +policies: + components: + - policies diff --git a/cfg/k3s-cis-1.8/controlplane.yaml b/cfg/k3s-cis-1.8/controlplane.yaml new file mode 100644 index 000000000..d14070656 --- /dev/null +++ b/cfg/k3s-cis-1.8/controlplane.yaml @@ -0,0 +1,62 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/k3s-cis-1.8/etcd.yaml b/cfg/k3s-cis-1.8/etcd.yaml new file mode 100644 index 000000000..4a39779cb --- /dev/null +++ b/cfg/k3s-cis-1.8/etcd.yaml @@ -0,0 +1,144 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: and + test_items: + - path: "{.client-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt" + - path: "{.client-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom cert and key files. + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.client-transport-security.client-cert-auth}" + compare: + op: eq + value: true + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable client certificate authentication. + scored: false + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - path: "{.client-transport-security.auto-tls}" + compare: + op: eq + value: false + - path: "{.client-transport-security.auto-tls}" + set: false + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + client-transport-security: + auto-tls: false + scored: false + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: and + test_items: + - path: "{.peer-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt" + - path: "{.peer-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates peer cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom peer cert and key files. + scored: false + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.peer-transport-security.client-cert-auth}" + compare: + op: eq + value: true + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable peer client certificate authentication. + scored: false + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - path: "{.peer-transport-security.auto-tls}" + compare: + op: eq + value: false + - path: "{.peer-transport-security.auto-tls}" + set: false + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + peer-transport-security: + auto-tls: false + scored: false + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates a unique certificate authority for etcd. + This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use a shared certificate authority. + scored: false diff --git a/cfg/k3s-cis-1.8/master.yaml b/cfg/k3s-cis-1.8/master.yaml new file mode 100644 index 000000000..eb4799a17 --- /dev/null +++ b/cfg/k3s-cis-1.8/master.yaml @@ -0,0 +1,985 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated)" + audit: find /var/lib/cni/networks -type f ! -name lock 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + By default, K3s sets the CNI file permissions to 600. + Note that for many CNIs, a lock file is created with permissions 750. This is expected and can be ignored. + If you modify your CNI configuration, ensure that the permissions are set to 600. + For example, chmod 600 /var/lib/cni/networks/ + scored: true + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)" + audit: find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root /var/lib/cni/networks/ + scored: true + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd + else + echo "permissions=700" + fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + type: "skip" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + Not Applicable. + For K3s, etcd is embedded within the k3s process. There is no separate etcd process. + Therefore the etcd data directory ownership is managed by the k3s process and should be root:root. + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $controllermanagerkubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /var/lib/rancher/k3s/server/tls + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.crt'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key + scored: true + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --anonymous-auth argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "anonymous-auth=true" + scored: true + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "token-auth-file=" + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s does not set DenyServiceExternalIPs. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + By default, K3s automatically provides the kubelet client certificate and key. + They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + By default, K3s automatically provides the kubelet CA cert file, at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "kubelet-certificate-authority=" + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters. + kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Permissive, per CIS guidelines, + "This setting could impact offline or isolated clusters, which have images pre-loaded and + do not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter. + kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Not Applicable. + Enabling Pod Security Policy is no longer supported on K3s v1.25+ and will cause applications to unexpectedly fail. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. + Follow the documentation and create ServiceAccount objects as per your environment. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + By default, K3s sets the --enable-admission-plugins to NodeRestriction. + If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins. + If you are, include NodeRestriction in the list. + kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." + scored: true + + - id: 1.2.16 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "profiling=true" + scored: true + + - id: 1.2.17 + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + kube-apiserver-arg: + - "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log" + scored: false + + - id: 1.2.18 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example, + kube-apiserver-arg: + - "audit-log-maxage=30" + scored: false + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example, + kube-apiserver-arg: + - "audit-log-maxbackup=10" + scored: false + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxsize parameter to an appropriate size in MB. For example, + kube-apiserver-arg: + - "audit-log-maxsize=100" + scored: false + + - id: 1.2.21 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--request-timeout" + remediation: | + Permissive, per CIS guidelines, + "it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". + Edit the K3s config file /etc/rancher/k3s/config.yaml + and set the below parameter if needed. For example, + kube-apiserver-arg: + - "request-timeout=300s" + scored: false + + - id: 1.2.22 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + By default, K3s does not set the --service-account-lookup argument. + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example, + kube-apiserver-arg: + - "service-account-lookup=true" + Alternatively, you can delete the service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.23 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + K3s automatically generates and sets the service account key file. + It is located at /var/lib/rancher/k3s/server/tls/service.key. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "service-account-key-file=" + scored: true + + - id: 1.2.24 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + journalctl -m -u k3s | grep -m1 'Running kube-apiserver' | tail -n1 + else + echo "--etcd-certfile AND --etcd-keyfile" + fi + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + K3s automatically generates and sets the etcd certificate and key files. + They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" + scored: true + + - id: 1.2.25 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver. + They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true + + - id: 1.2.26 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + By default, K3s automatically provides the client certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "client-ca-file=" + scored: true + + - id: 1.2.27 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + By default, K3s automatically provides the etcd certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-cafile=" + scored: true + + - id: 1.2.28 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + K3s can be configured to use encryption providers to encrypt secrets at rest. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json. + scored: false + + - id: 1.2.29 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json + scored: false + + - id: 1.2.30 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. + If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. + If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following: + kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + scored: true + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node + and set the --terminated-pod-gc-threshold to an appropriate threshold, + kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "profiling=true" + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + By default, K3s sets the --use-service-account-credentials argument to true. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "use-service-account-credentials=false" + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + By default, K3s automatically provides the service account private key file. + It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "service-account-private-key-file=" + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + By default, K3s automatically provides the root CA file. + It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "root-ca-file=" + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "bind-address=" + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "profiling=true" + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "bind-address=" + scored: true diff --git a/cfg/k3s-cis-1.8/node.yaml b/cfg/k3s-cis-1.8/node.yaml new file mode 100644 index 000000000..7a238a1a8 --- /dev/null +++ b/cfg/k3s-cis-1.8/node.yaml @@ -0,0 +1,422 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + Not Applicable. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: 'stat -c %U:%G $kubeletkubeconfig' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a $kubeletcafile" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 $kubeletcafile + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $kubeletcafile" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root $kubeletcafile + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: root:root + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you + should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "anonymous-auth=true" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="anonymous-auth=true" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' ' + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "authorization-mode=AlwaysAllow" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="authorization-mode=AlwaysAllow" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + By default, K3s automatically provides the client ca certificate for the Kubelet. + It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you + should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "read-only-port=XXXX" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="read-only-port=XXXX" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "streaming-connection-idle-timeout=5m" + If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. + kubelet-arg: + - "make-iptables-util-chains=true" + If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + type: "skip" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Not Applicable. + By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply + with cloud providers that require this flag to ensure that hostname matches node names. + scored: true + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + By default, K3s sets the event-qps to 0. Should you wish to change this, + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "event-qps=" + If using the command line, run K3s with --kubelet-arg="event-qps=". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --tls-cert-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.crt' + - flag: --tls-private-key-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' + remediation: | + By default, K3s automatically provides the TLS certificate and private key for the Kubelet. + They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter. + If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter. + If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `tlsCipherSuites` to + kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + or to a subset of these values. + If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=" + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `podPidsLimit` to + kubelet-arg: + - "pod-max-pids=" + scored: false diff --git a/cfg/k3s-cis-1.8/policies.yaml b/cfg/k3s-cis-1.8/policies.yaml new file mode 100644 index 000000000..29c65ef42 --- /dev/null +++ b/cfg/k3s-cis-1.8/policies.yaml @@ -0,0 +1,300 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: true + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/rh-1.0/controlplane.yaml b/cfg/rh-1.0/controlplane.yaml index 392226390..606194ddf 100644 --- a/cfg/rh-1.0/controlplane.yaml +++ b/cfg/rh-1.0/controlplane.yaml @@ -40,9 +40,9 @@ groups: #To view openshift apiserver log files oc adm node-logs --role=master --path=openshift-apiserver/ #To verify kube apiserver audit config - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' #To verify openshift apiserver audit config - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig[]?' type: manual remediation: | No remediation required. @@ -52,9 +52,9 @@ groups: text: "Ensure that the audit policy covers key security concerns (Manual)" audit: | #To verify openshift apiserver audit config - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' #To verify kube apiserver audit config - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' type: manual remediation: | In OpenShift 4.6 and higher, if appropriate for your needs, diff --git a/cfg/rh-1.0/etcd.yaml b/cfg/rh-1.0/etcd.yaml index 13cd3fdae..4398d9cc1 100644 --- a/cfg/rh-1.0/etcd.yaml +++ b/cfg/rh-1.0/etcd.yaml @@ -11,16 +11,17 @@ groups: - id: 2.1 text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Manual)" audit: | - # For --cert-file - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' - done 2>/dev/null - # For --key-file - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--key-file=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: @@ -36,10 +37,16 @@ groups: - id: 2.2 text: "Ensure that the --client-cert-auth argument is set to true (Manual)" audit: | - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--client-cert-auth=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: @@ -55,10 +62,16 @@ groups: text: "Ensure that the --auto-tls argument is not set to true (Manual)" audit: | # Returns 0 if found, 1 if not found - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --auto-tls=true 2>&1>/dev/null ; echo exit_code=$? - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --auto-tls=true 2>/dev/null ; echo exit_code=$? + fi use_multiple_values: true tests: test_items: @@ -67,22 +80,23 @@ groups: op: eq value: "1" remediation: | - This setting is managed by the cluster etcd operator. No remediation required.e + This setting is managed by the cluster etcd operator. No remediation required. scored: false - id: 2.4 text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Manual)" audit: | - # For --peer-cert-file - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' - done 2>/dev/null - # For --peer-key-file - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-cert-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-key-file=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: @@ -97,10 +111,16 @@ groups: - id: 2.5 text: "Ensure that the --peer-client-cert-auth argument is set to true (Manual)" audit: | - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-client-cert-auth=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: @@ -116,10 +136,16 @@ groups: text: "Ensure that the --peer-auto-tls argument is not set to true (Manual)" audit: | # Returns 0 if found, 1 if not found - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>&1>/dev/null ; echo exit_code=$? - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | grep -- --peer-auto-tls=true 2>/dev/null ; echo exit_code=$? + fi use_multiple_values: true tests: test_items: @@ -134,14 +160,17 @@ groups: - id: 2.7 text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" audit: | - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' - done 2>/dev/null - for i in $(oc get pods -oname -n openshift-etcd) - do - oc exec -n openshift-etcd -c etcd $i -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + if [ -z "$POD_NAME" ]; then + echo "No matching file found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--trusted-ca-file=[^ ]*\).*/\1/' + oc exec -n openshift-etcd -c etcd "$POD_NAME" -- ps -o command= -C etcd | sed 's/.*\(--peer-trusted-ca-file=[^ ]*\).*/\1/' + fi use_multiple_values: true tests: test_items: diff --git a/cfg/rh-1.0/master.yaml b/cfg/rh-1.0/master.yaml index dfeb7ecb0..37b50f033 100644 --- a/cfg/rh-1.0/master.yaml +++ b/cfg/rh-1.0/master.yaml @@ -11,10 +11,18 @@ groups: - id: 1.1.1 text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o name ) - do - oc exec -n openshift-kube-apiserver $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml; - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -29,11 +37,18 @@ groups: - id: 1.1.2 text: "Ensure that the API server pod specification file ownership is set to root:root (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver -o name ) - do - oc exec -n openshift-kube-apiserver $i -- \ - stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-apiserver-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -45,10 +60,18 @@ groups: - id: 1.1.3 text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-controller-manager -o name -l app=kube-controller-manager) - do - oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml; - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -63,11 +86,18 @@ groups: - id: 1.1.4 text: "Ensure that the controller manager pod specification file ownership is set to root:root (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-controller-manager -o name -l app=kube-controller-manager) - do - oc exec -n openshift-kube-controller-manager $i -- \ - stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-controller-manager-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -79,10 +109,18 @@ groups: - id: 1.1.5 text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $( oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name ) - do - oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml; - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -97,11 +135,18 @@ groups: - id: 1.1.6 text: "Ensure that the scheduler pod specification file ownership is set to root:root (Manual))" audit: | - for i in $( oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name ) - do - oc exec -n openshift-kube-scheduler $i -- \ - stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/kube-scheduler-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -113,10 +158,18 @@ groups: - id: 1.1.7 text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Manual))" audit: | - for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd ) - do - oc rsh -n openshift-etcd $i stat -c "$i %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/manifests/etcd-pod.yaml + fi use_multiple_values: true tests: test_items: @@ -129,33 +182,66 @@ groups: scored: false - id: 1.1.8 - text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + text: "Ensure that the etcd pod specification file ownership is set to root:root (Manual)" audit: | - for i in $( oc get pods -n openshift-etcd -l app=etcd -o name | grep etcd ) - do - oc rsh -n openshift-etcd $i stat -c "$i %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc rsh -n openshift-etcd "$POD_NAME" stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/manifests/etcd-pod.yaml + fi use_multiple_values: true tests: test_items: - flag: "root:root" remediation: | No remediation required; file permissions are managed by the operator. - scored: true + scored: false - id: 1.1.9 text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') # For CNI multus - for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; done 2>/dev/null - for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; done 2>/dev/null + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/etc/cni/net.d/*.conf"; 2>/dev/null + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c \"$i %n permissions=%a\" /host/var/run/multus/cni/net.d/*.conf"; 2>/dev/null + fi # For SDN pods - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi + # For OVS pods - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \;; done 2>/dev/null + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n permissions=%a" {} \; 2>/dev/null + fi use_multiple_values: true tests: test_items: @@ -170,17 +256,40 @@ groups: - id: 1.1.10 text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" audit: | + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') # For CNI multus - for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \""$i %n %U:%G\" /host/etc/cni/net.d/*.conf"; done 2>/dev/null - for i in $(oc get pods -n openshift-multus -l app=multus -oname); do oc exec -n openshift-multus $i -- /bin/bash -c "stat -c \""$i %n %U:%G\" /host/var/run/multus/cni/net.d/*.conf"; done 2>/dev/null + # Get the pod name in the openshift-multus namespace + POD_NAME=$(oc get pods -n openshift-multus -l app=multus --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-multus "$POD_NAME" -- /bin/bash -c "stat -c '$i %n %U:%G' /host/etc/cni/net.d/*.conf" 2>/dev/null + oc exec -n openshift-multus $i -- /bin/bash -c "stat -c '$i %n %U:%G' /host/var/run/multus/cni/net.d/*.conf" 2>/dev/null + fi # For SDN pods - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname); do oc exec -n openshift-sdn $i -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G"{} \;; done 2>/dev/null + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/lib/cni/networks/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openshift-sdn -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi # For OVS pods in 4.5 - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null - for i in $(oc get pods -n openshift-sdn -l app=ovs -oname); do oc exec -n openshift-sdn $i -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \;; done 2>/dev/null - # For OVS pods in 4.6 TBD + POD_NAME=$(oc get pods -n openshift-sdn -l app=ovs --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- find /var/run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /etc/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- find /run/openvswitch -type f -exec stat -c "$i %n %U:%G" {} \; 2>/dev/null + fi use_multiple_values: true tests: test_items: @@ -192,7 +301,18 @@ groups: - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Manual)" audit: | - for i in $(oc get pods -n openshift-etcd -l app=etcd -oname); do oc exec -n openshift-etcd -c etcd $i -- stat -c "$i %n permissions=%a" /var/lib/etcd/member; done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /var/lib/etcd/member + fi use_multiple_values: true tests: test_items: @@ -207,7 +327,18 @@ groups: - id: 1.1.12 text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Manual)" audit: | - for i in $(oc get pods -n openshift-etcd -l app=etcd -oname); do oc exec -n openshift-etcd -c etcd $i -- stat -c "$i %n %U:%G" /var/lib/etcd/member; done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-etcd namespace + POD_NAME=$(oc get pods -n openshift-etcd -l app=etcd --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-etcd "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /var/lib/etcd/member + fi use_multiple_values: true tests: test_items: @@ -219,10 +350,8 @@ groups: - id: 1.1.13 text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Manual))" audit: | - for i in $(oc get nodes -o name) - do - oc debug $i -- chroot /host stat -c "$i %n permissions=%a" /etc/kubernetes/kubeconfig - done 2>/dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubeconfig 2> /dev/null use_multiple_values: true tests: test_items: @@ -237,10 +366,8 @@ groups: - id: 1.1.14 text: "Ensure that the admin.conf file ownership is set to root:root (Manual)" audit: | - for i in $(oc get nodes -o name) - do - oc debug $i -- chroot /host stat -c "$i %n %U:%G" /etc/kubernetes/kubeconfig - done 2>/dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubeconfig 2> /dev/null use_multiple_values: true tests: test_items: @@ -252,10 +379,18 @@ groups: - id: 1.1.15 text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name) - do - oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi use_multiple_values: true tests: test_items: @@ -270,10 +405,18 @@ groups: - id: 1.1.16 text: "Ensure that the scheduler.conf file ownership is set to root:root (Manual)" audit: | - for i in $(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler -o name) - do - oc exec -n openshift-kube-scheduler $i -- stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-scheduler namespace + POD_NAME=$(oc get pods -n openshift-kube-scheduler -l app=openshift-kube-scheduler --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-scheduler "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/scheduler-kubeconfig/kubeconfig + fi use_multiple_values: true tests: test_items: @@ -285,10 +428,18 @@ groups: - id: 1.1.17 text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o name) - do - oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n permissions=%a" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi use_multiple_values: true tests: test_items: @@ -303,10 +454,18 @@ groups: - id: 1.1.18 text: "Ensure that the controller-manager.conf file ownership is set to root:root (Manual)" audit: | - for i in $(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager -o name) - do - oc exec -n openshift-kube-controller-manager $i -- stat -c "$i %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig - done 2>/dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-controller-manager -l app=kube-controller-manager --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-controller-manager "$POD_NAME" -- stat -c "$POD_NAME %n %U:%G" /etc/kubernetes/static-pod-resources/configmaps/controller-manager-kubeconfig/kubeconfig + fi use_multiple_values: true tests: test_items: @@ -316,34 +475,48 @@ groups: scored: false - id: 1.1.19 - text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Manual)" audit: | # Should return root:root for all files and directories - for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') - do - # echo $i static-pod-certs - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - # echo $i static-pod-resources - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; - done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-controller-manager namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # echo $i static-pod-certs + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + # echo $i static-pod-resources + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type d -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-resources -type f -wholename '*/secrets*' -exec stat -c "$i %n %U:%G" {} \; + fi use_multiple_values: true tests: test_items: - flag: "root:root" remediation: | No remediation required; file permissions are managed by the operator. - scored: true + scored: false - id: 1.1.20 text: "Ensure that the OpenShift PKI certificate file permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') - do - # echo $i static-pod-certs - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$i %n permissions=%a" {} \; - done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.crt' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi use_multiple_values: true tests: test_items: @@ -358,11 +531,18 @@ groups: - id: 1.1.21 text: "Ensure that the OpenShift PKI key file permissions are set to 600 (Manual)" audit: | - for i in $(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[*].metadata.name}') - do - # echo $i static-pod-certs - oc exec -n openshift-kube-apiserver $i -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$i %n permissions=%a" {} \; - done + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + + # Get the pod name in the openshift-kube-apiserver namespace + POD_NAME=$(oc get pods -n openshift-kube-apiserver -l app=openshift-kube-apiserver --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-kube-apiserver "$POD_NAME" -c kube-apiserver -- find /etc/kubernetes/static-pod-certs -type f -wholename '*/secrets/*.key' -exec stat -c "$POD_NAME %n permissions=%a" {} \; + fi use_multiple_values: true tests: test_items: @@ -381,9 +561,9 @@ groups: text: "Ensure that anonymous requests are authorized (Manual)" audit: | # To verify that userGroups include system:unauthenticated - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]' + oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?' # To verify that userGroups include system:unauthenticated - oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[].userGroups' + oc get configmap config -n openshift-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.auditConfig.policyConfiguration.rules[]?.userGroups' # To verify RBAC is enabled oc get clusterrolebinding oc get clusterrole @@ -532,11 +712,9 @@ groups: oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.apiServerArguments' oc get kubeapiservers.operator.openshift.io cluster -o json | jq '.spec.observedConfig.apiServerArguments' # For OCP 4.5 and earlier verify that authorization-mode is not used - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode - oc debug node/${node} -- chroot /host ps -aux | grep kubelet | grep authorization-mode - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host cat /etc/kubernetes/kubelet.conf | grep authorization-mode 2> /dev/null + oc debug node/$NODE_NAME -- chroot /host ps -aux | grep kubelet | grep authorization-mode 2> /dev/null #Check that no overrides are configured oc get kubeapiservers.operator.openshift.io cluster -o json | jq -r '.spec.unsupportedConfigOverrides' audit_config: | @@ -864,7 +1042,6 @@ groups: remediation: | Follow the documentation for log forwarding. Forwarding logs to third party systems https://docs.openshift.com/container-platform/4.5/logging/cluster-logging-external.html - scored: false - id: 1.2.24 @@ -1070,6 +1247,12 @@ groups: - id: 1.2.35 text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" type: manual + audit: | + # verify cipher suites + oc get cm -n openshift-authentication v4-0-config-system-cliconfig -o jsonpath='{.data.v4\-0\-config\-system\-cliconfig}' | jq .servingInfo + oc get kubeapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo + oc get openshiftapiservers.operator.openshift.io cluster -o json |jq.spec.observedConfig.servingInfo + oc describe --namespace=openshift-ingress-operator ingresscontroller/default remediation: | Verify that the tlsSecurityProfile is set to the value you chose. Note: The HAProxy Ingress controller image does not support TLS 1.3 diff --git a/cfg/rh-1.0/node.yaml b/cfg/rh-1.0/node.yaml index 80dbd68b9..fb5a47324 100644 --- a/cfg/rh-1.0/node.yaml +++ b/cfg/rh-1.0/node.yaml @@ -11,11 +11,8 @@ groups: - id: 4.1.1 text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/systemd/system/kubelet.service - done 2> /dev/null - use_multiple_values: true + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/systemd/system/kubelet.service 2> /dev/null tests: test_items: - flag: "permissions" @@ -30,11 +27,8 @@ groups: text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" audit: | # Should return root:root for each node - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/systemd/system/kubelet.service - done 2> /dev/null - use_multiple_values: true + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/systemd/system/kubelet.service 2> /dev/null tests: test_items: - flag: root:root @@ -45,11 +39,17 @@ groups: - id: 4.1.3 text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" audit: | - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname) - do - oc exec -n openshift-sdn $i -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml - done 2> /dev/null - use_multiple_values: true + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null + fi tests: bin_op: or test_items: @@ -65,10 +65,17 @@ groups: - id: 4.1.4 text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" audit: | - for i in $(oc get pods -n openshift-sdn -l app=sdn -oname) - do - oc exec -n openshift-sdn $i -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml - done 2> /dev/null + # Get the node name where the pod is running + NODE_NAME=$(oc get pod "$HOSTNAME" -o=jsonpath='{.spec.nodeName}') + # Get the pod name in the openshift-sdn namespace + POD_NAME=$(oc get pods -n openshift-sdn -l app=sdn --field-selector spec.nodeName="$NODE_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) + + if [ -z "$POD_NAME" ]; then + echo "No matching pods found on the current node." + else + # Execute the stat command + oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n %U:%G" /config/kube-proxy-config.yaml 2>/dev/null + fi use_multiple_values: true tests: bin_op: or @@ -82,10 +89,8 @@ groups: text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Manual)" audit: | # Check permissions - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/kubernetes/kubelet.conf - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: test_items: @@ -100,10 +105,8 @@ groups: - id: 4.1.6 text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Manual)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/kubernetes/kubelet.conf - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: test_items: @@ -115,10 +118,8 @@ groups: - id: 4.1.7 text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /etc/kubernetes/kubelet-ca.crt - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /etc/kubernetes/kubelet-ca.crt 2> /dev/null use_multiple_values: true tests: test_items: @@ -133,10 +134,8 @@ groups: - id: 4.1.8 text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /etc/kubernetes/kubelet-ca.crt - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /etc/kubernetes/kubelet-ca.crt 2> /dev/null use_multiple_values: true tests: test_items: @@ -148,10 +147,8 @@ groups: - id: 4.1.9 text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n permissions=%a" /var/lib/kubelet/kubeconfig - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n permissions=%a" /var/lib/kubelet/kubeconfig 2> /dev/null use_multiple_values: true tests: test_items: @@ -166,10 +163,8 @@ groups: - id: 4.1.10 text: "Ensure that the kubelet configuration file ownership is set to root:root (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host stat -c "$node %n %U:%G" /var/lib/kubelet/kubeconfig - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host stat -c "$NODE_NAME %n %U:%G" /var/lib/kubelet/kubeconfig 2> /dev/null use_multiple_values: true tests: test_items: @@ -184,10 +179,8 @@ groups: - id: 4.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host grep -B4 -A1 anonymous: /etc/kubernetes/kubelet.conf - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host grep -B4 -A1 anonymous /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: test_items: @@ -199,38 +192,31 @@ groups: scored: true - id: 4.2.2 - text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Manual)" type: manual # Takes a lot of time for connection to fail and audit: | POD=$(oc -n openshift-kube-apiserver get pod -l app=openshift-kube-apiserver -o jsonpath='{.items[0].metadata.name}') TOKEN=$(oc whoami -t) - for name in $(oc get nodes -ojsonpath='{.items[*].metadata.name}') - do - oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$name/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc exec -n openshift-kube-apiserver $POD -- curl -sS https://172.25.0.1/api/v1/nodes/$NODE_NAME/proxy/configz -k -H "Authorization:Bearer $TOKEN" | jq -r '.kubeletconfig.authorization.mode' 2> /dev/null use_multiple_values: true tests: test_items: - flag: "Connection timed out" remediation: | None required. Unauthenticated/Unauthorized users have no access to OpenShift nodes. - scored: true + scored: false - id: 4.2.3 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host grep clientCAFile: /etc/kubernetes/kubelet.conf - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host grep clientCAFile /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: test_items: - - flag: "clientCAFile" - compare: - op: eq - value: "/etc/kubernetes/kubelet-ca.crt" + - flag: '"clientCAFile": "/etc/kubernetes/kubelet-ca.crt"' remediation: | None required. Changing the clientCAFile value is unsupported. scored: true @@ -258,18 +244,13 @@ groups: - id: 4.2.5 text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" audit: | - # Should return 1 for each node - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout - echo exit_code=$? - done 2>/dev/null - # Should return 1 for each node - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf - echo exit_code=$? - done 2>/dev/null + # Should return 1 for node + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host ps -ef | grep kubelet | grep streaming-connection-idle-timeout 2> /dev/null + echo exit_code=$? + # Should return 1 for node + oc debug node/${NODE_NAME} -- chroot /host grep streamingConnectionIdleTimeout /etc/kubernetes/kubelet.conf 2> /dev/null + echo exit_code=$? use_multiple_values: true tests: bin_op: or @@ -278,6 +259,10 @@ groups: compare: op: noteq value: 0 + - flag: streamingConnectionIdleTimeout + compare: + op: noteq + value: 0s - flag: "exit_code" compare: op: eq @@ -290,10 +275,8 @@ groups: - id: 4.2.6 text: "Ensure that the --protect-kernel-defaults argument is not set (Manual)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); - do - oc debug node/${node} -- chroot /host more /etc/kubernetes/kubelet.conf; - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/$NODE_NAME -- chroot /host more /etc/kubernetes/kubelet.conf 2> /dev/null tests: test_items: - flag: protectKernelDefaults @@ -346,12 +329,10 @@ groups: scored: false - id: 4.2.9 - text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Automated)" + text: "Ensure that the kubeAPIQPS [--event-qps] argument is set to 0 or a level which ensures appropriate event capture (Manual)" audit: | - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); - do - oc debug node/${node} -- chroot /host more /etc/kubernetes/kubelet.conf; - done + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf; oc get machineconfig 01-worker-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 oc get machineconfig 01-master-kubelet -o yaml | grep --color kubeAPIQPS%3A%2050 type: "manual" @@ -359,12 +340,17 @@ groups: Follow the documentation to edit kubelet parameters https://docs.openshift.com/container-platform/4.5/scalability_and_performance/recommended-host-practices.html#create-a-kubeletconfig-crd-to-edit-kubelet-parameters KubeAPIQPS: - scored: true + scored: false - id: 4.2.10 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" audit: | - oc get configmap config -n openshift-kube-apiserver -ojson | jq -r '.data["config.yaml"]' | jq '.kubeletClientInfo' + oc get configmap config -n openshift-kube-apiserver -o json \ + | jq -r '.data["config.yaml"]' \ + | jq -r '.apiServerArguments | + .["kubelet-client-certificate"][0], + .["kubelet-client-key"][0] + ' tests: bin_op: and test_items: @@ -379,15 +365,10 @@ groups: text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" audit: | #Verify the rotateKubeletClientCertificate feature gate is not set to false - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host cat /etc/kubernetes/kubelet.conf | grep RotateKubeletClientCertificate 2> /dev/null # Verify the rotateCertificates argument is set to true - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot host grep rotate /etc/kubernetes/kubelet.conf; - done 2> /dev/null + oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: bin_op: or @@ -410,24 +391,19 @@ groups: text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" audit: | #Verify the rotateKubeletServerCertificate feature gate is on - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); - do - oc debug node/${node} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf; - done 2> /dev/null + NODE_NAME=$(oc get pod $HOSTNAME -o=jsonpath='{.spec.nodeName}') + oc debug node/${NODE_NAME} -- chroot /host grep RotateKubeletServerCertificate /etc/kubernetes/kubelet.conf 2> /dev/null # Verify the rotateCertificates argument is set to true - for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}') - do - oc debug node/${node} -- chroot host grep rotate /etc/kubernetes/kubelet.conf; - done 2> /dev/null + oc debug node/${NODE_NAME} -- chroot host grep rotate /etc/kubernetes/kubelet.conf 2> /dev/null use_multiple_values: true tests: bin_op: or test_items: - - flag: RotateKubeletServerCertificate + - flag: rotateCertificates compare: op: eq value: true - - flag: rotateCertificates + - flag: RotateKubeletServerCertificate compare: op: eq value: true diff --git a/cfg/rh-1.0/policies.yaml b/cfg/rh-1.0/policies.yaml index 2a629b422..e90cd877f 100644 --- a/cfg/rh-1.0/policies.yaml +++ b/cfg/rh-1.0/policies.yaml @@ -11,6 +11,12 @@ groups: - id: 5.1.1 text: "Ensure that the cluster-admin role is only used where required (Manual)" type: "manual" + audit: | + #To get a list of users and service accounts with the cluster-admin role + oc get clusterrolebindings -o=customcolumns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].kind | + grep cluster-admin + #To verity that kbueadmin is removed, no results should be returned + oc get secrets kubeadmin -n kube-system remediation: | Identify all clusterrolebindings to the cluster-admin role. Check if they are used and if they need this role or if they could use a role with fewer privileges. @@ -29,6 +35,15 @@ groups: - id: 5.1.3 text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" type: "manual" + audit: | + #needs verification + oc get roles --all-namespaces -o yaml + for i in $(oc get roles -A -o jsonpath='{.items[*].metadata.name}'); do oc + describe clusterrole ${i}; done + #Retrieve the cluster roles defined in the cluster and review for wildcards + oc get clusterroles -o yaml + for i in $(oc get clusterroles -o jsonpath='{.items[*].metadata.name}'); do + oc describe clusterrole ${i}; done remediation: | Where possible replace any use of wildcards in clusterroles and roles with specific objects or actions. @@ -63,10 +78,7 @@ groups: text: "Minimize the admission of privileged containers (Manual)" audit: | # needs verification - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Privileged"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegedContainer:.allowPrivilegedContainer tests: test_items: - flag: "false" @@ -78,10 +90,7 @@ groups: - id: 5.2.2 text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" audit: | - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Host PID"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostPID:.allowHostPID tests: test_items: - flag: "false" @@ -93,10 +102,7 @@ groups: - id: 5.2.3 text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" audit: | - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Host IPC"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostIPC:.allowHostIPC tests: test_items: - flag: "false" @@ -108,10 +114,7 @@ groups: - id: 5.2.4 text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" audit: | - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Host Network"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowHostNetwork:.allowHostNetwork tests: test_items: - flag: "false" @@ -123,10 +126,7 @@ groups: - id: 5.2.5 text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" audit: | - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; oc describe scc $i | grep "Allow Privilege Escalation"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,allowPrivilegeEscalation:.allowPrivilegeEscalation tests: test_items: - flag: "false" @@ -138,18 +138,10 @@ groups: - id: 5.2.6 text: "Minimize the admission of root containers (Manual)" audit: | - # needs verification - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; - oc describe scc $i | grep "Run As User Strategy"; - done + # needs verification # | awk 'NR>1 {gsub("map\\[type:", "", $2); gsub("\\]$", "", $2); print $1 ":" $2}' + oc get scc -o=custom-columns=NAME:.metadata.name,runAsUser:.runAsUser.type #For SCCs with MustRunAs verify that the range of UIDs does not include 0 - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; - oc describe scc $i | grep "\sUID"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,uidRangeMin:.runAsUser.uidRangeMin,uidRangeMax:.runAsUser.uidRangeMax tests: bin_op: or test_items: @@ -168,11 +160,7 @@ groups: text: "Minimize the admission of containers with the NET_RAW capability (Manual)" audit: | # needs verification - for i in `oc get scc --template '{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'`; - do - echo "$i"; - oc describe scc $i | grep "Required Drop Capabilities"; - done + oc get scc -o=custom-columns=NAME:.metadata.name,requiredDropCapabilities:.requiredDropCapabilities tests: bin_op: or test_items: @@ -213,6 +201,9 @@ groups: - id: 5.3.2 text: "Ensure that all Namespaces have Network Policies defined (Manual)" type: "manual" + audit: | + #Run the following command and review the NetworkPolicy objects created in the cluster. + oc -n all get networkpolicy remediation: | Follow the documentation and create NetworkPolicy objects as you need them. scored: false @@ -223,6 +214,10 @@ groups: - id: 5.4.1 text: "Prefer using secrets as files over secrets as environment variables (Manual)" type: "manual" + audit: | + #Run the following command to find references to objects which use environment variables defined from secrets. + oc get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} + {.metadata.name} {"\n"}{end}' -A remediation: | If possible, rewrite application code to read secrets from mounted secret files, rather than from environment variables. @@ -252,6 +247,10 @@ groups: - id: 5.7.1 text: "Create administrative boundaries between resources using namespaces (Manual)" type: "manual" + audit: | + #Run the following command and review the namespaces created in the cluster. + oc get namespaces + #Ensure that these namespaces are the ones you need and are adequately administered as per your requirements. remediation: | Follow the documentation and create namespaces for objects in your deployment as you need them. @@ -277,6 +276,11 @@ groups: - id: 5.7.4 text: "The default namespace should not be used (Manual)" type: "manual" + audit: | + #Run this command to list objects in default namespace + oc project default + oc get all + #The only entries there should be system managed resources such as the kubernetes and openshift service remediation: | Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace. diff --git a/cfg/rke-cis-1.23/config.yaml b/cfg/rke-cis-1.23/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rke-cis-1.23/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rke-cis-1.23/controlplane.yaml b/cfg/rke-cis-1.23/controlplane.yaml new file mode 100644 index 000000000..4dab24671 --- /dev/null +++ b/cfg/rke-cis-1.23/controlplane.yaml @@ -0,0 +1,46 @@ +--- +controls: +version: "rke-cis-1.23" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/rke-cis-1.23/etcd.yaml b/cfg/rke-cis-1.23/etcd.yaml new file mode 100644 index 000000000..f0325e02f --- /dev/null +++ b/cfg/rke-cis-1.23/etcd.yaml @@ -0,0 +1,149 @@ +--- +controls: +version: "rke-cis-1.23" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + set: true + - flag: "--key-file" + env: "ETCD_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + set: true + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-client-cert-auth" + set: true + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: true diff --git a/cfg/rke-cis-1.23/master.yaml b/cfg/rke-cis-1.23/master.yaml new file mode 100644 index 000000000..bc2338215 --- /dev/null +++ b/cfg/rke-cis-1.23/master.yaml @@ -0,0 +1,1011 @@ +--- +controls: +version: "rke-cis-1.23" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Clusters provisioned by RKE do not require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for etcd. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: stat -c %a /node/var/lib/etcd + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + type: "skip" + audit: "stat -c %U:%G /node/var/lib/etcd" + tests: + test_items: + - flag: "etcd:etcd" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + A cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "check_files_owner_in_dir.sh /node/etc/kubernetes/ssl" + tests: + test_items: + - flag: "true" + compare: + op: eq + value: "true" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 644 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: true + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + type: "manual" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + type: "manual" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + set: true + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + set: true + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + set: true + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: "skip" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + Enabling encryption changes how data can be recovered as data is encrypted. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + type: "manual" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: true + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + Cluster provisioned by RKE handles certificate rotation directly through RKE. + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/rke-cis-1.23/node.yaml b/cfg/rke-cis-1.23/node.yaml new file mode 100644 index 000000000..a509ed743 --- /dev/null +++ b/cfg/rke-cis-1.23/node.yaml @@ -0,0 +1,466 @@ +--- +controls: +version: "rke-cis-1.23" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e /node$kubeletkubeconfig; then stat -c permissions=%a /node$kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e /node$kubeletkubeconfig; then stat -c %U:%G /node$kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Automated)" + audit: "stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 644 + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem" + tests: + test_items: + - flag: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + + Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + + Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + System level configurations are required prior to provisioning the cluster in order for this argument to be set to true. + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + type: "skip" + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors + scored: false + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Clusters provisioned by RKE handles certificate rotation directly through RKE. + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true diff --git a/cfg/rke-cis-1.23/policies.yaml b/cfg/rke-cis-1.23/policies.yaml new file mode 100644 index 000000000..5e4c6d640 --- /dev/null +++ b/cfg/rke-cis-1.23/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "rke-cis-1.23" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "skip" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "skip" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/rke-cis-1.24/config.yaml b/cfg/rke-cis-1.24/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rke-cis-1.24/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rke-cis-1.24/controlplane.yaml b/cfg/rke-cis-1.24/controlplane.yaml new file mode 100644 index 000000000..227bcb221 --- /dev/null +++ b/cfg/rke-cis-1.24/controlplane.yaml @@ -0,0 +1,46 @@ +--- +controls: +version: "rke-cis-1.24" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/rke-cis-1.24/etcd.yaml b/cfg/rke-cis-1.24/etcd.yaml new file mode 100644 index 000000000..835e43c41 --- /dev/null +++ b/cfg/rke-cis-1.24/etcd.yaml @@ -0,0 +1,149 @@ +--- +controls: +version: "rke-cis-1.24" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + set: true + - flag: "--key-file" + env: "ETCD_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--client-cert-auth" + set: true + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-client-cert-auth" + set: true + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: true diff --git a/cfg/rke-cis-1.24/master.yaml b/cfg/rke-cis-1.24/master.yaml new file mode 100644 index 000000000..c08d6e57b --- /dev/null +++ b/cfg/rke-cis-1.24/master.yaml @@ -0,0 +1,1014 @@ +--- +controls: +version: "rke-cis-1.24" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf;else echo \"File not found\"; fi'" + tests: + bin_op: or + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + - flag: "File not found" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: true + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: stat -c %a /node/var/lib/etcd + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + type: "skip" + audit: "stat -c %U:%G /node/var/lib/etcd" + tests: + test_items: + - flag: "etcd:etcd" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "check_files_owner_in_dir.sh /node/etc/kubernetes/ssl" + tests: + test_items: + - flag: "true" + compare: + op: eq + value: "true" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Automated)" + audit: | + if test -n "$(find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem')"; then find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' | xargs stat -c permissions=%a;else echo "File not found"; fi + tests: + bin_op: or + test_items: + - flag: "File not found" + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' -exec chmod -R 600 {} + + scored: true + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: | + if test -n "$(find /node/etc/kubernetes/ssl/ -name '*.pem')"; then find /node/etc/kubernetes/ssl/ -name '*.pem' | xargs stat -c permissions=%a;else echo \"File not found\"; fi + tests: + bin_op: or + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + - flag: "File not found" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + find /node/etc/kubernetes/ssl/ -name '*key.pem' -exec chmod -R 600 {} + + scored: true + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: true + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: true + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + set: true + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + set: true + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + set: true + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: true + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Automated)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + Enabling encryption changes how data can be recovered as data is encrypted. + scored: true + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: true + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: true + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + Cluster provisioned by RKE handles certificate rotation directly through RKE. + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/rke-cis-1.24/node.yaml b/cfg/rke-cis-1.24/node.yaml new file mode 100644 index 000000000..653f1b754 --- /dev/null +++ b/cfg/rke-cis-1.24/node.yaml @@ -0,0 +1,459 @@ +--- +controls: +version: "rke-cis-1.24" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c "if test -e /node/etc/kubernetes/ssl/kube-ca.pem; then stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem; else echo \"File not found\"; fi"' + tests: + bin_op: or + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + - flag: "File not found" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: true + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: '/bin/sh -c "if test -e /node/etc/kubernetes/ssl/kube-ca.pem; then stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem; else echo \"File not found\"; fi"' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "File not found" + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: true + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Cluster provisioned by RKE doesn't require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Cluster provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + System level configurations are required prior to provisioning the cluster in order for this argument to be set to true. + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + type: "manual" + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors + scored: false + + - id: 4.2.9 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: true + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + type: "manual" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Clusters provisioned by RKE handles certificate rotation directly through RKE. + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true diff --git a/cfg/rke-cis-1.24/policies.yaml b/cfg/rke-cis-1.24/policies.yaml new file mode 100644 index 000000000..2b8dba3e1 --- /dev/null +++ b/cfg/rke-cis-1.24/policies.yaml @@ -0,0 +1,327 @@ +--- +controls: +version: "rke-cis-1.24" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + audit: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostPID == null) or (.spec.hostPID == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + tests: + bin_op: or + test_items: + - flag: "kubectl: not found" + - flag: "jq: not found" + - flag: count + compare: + op: gt + value: 0 + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: true + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + audit: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostIPC == null) or (.spec.hostIPC == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + tests: + bin_op: or + test_items: + - flag: "kubectl: not found" + - flag: "jq: not found" + - flag: count + compare: + op: gt + value: 0 + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: true + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + audit: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.hostNetwork == null) or (.spec.hostNetwork == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + tests: + bin_op: or + test_items: + - flag: "kubectl: not found" + - flag: "jq: not found" + - flag: count + compare: + op: gt + value: 0 + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: true + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + audit: | + kubectl get psp -o json | jq .items[] | jq -r 'select((.spec.allowPrivilegeEscalation == null) or (.spec.allowPrivilegeEscalation == false))' | jq .metadata.name | wc -l | xargs -I {} echo '--count={}' + tests: + bin_op: or + test_items: + - flag: "kubectl: not found" + - flag: "jq: not found" + - flag: count + compare: + op: gt + value: 0 + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: true + + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "skip" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Automated)" + audit: | + #!/bin/bash + set -eE + handle_error() { + echo "false" + } + trap 'handle_error' ERR + count=$(kubectl get all -n default -o json | jq .items[] | jq -r 'select((.metadata.name!="kubernetes"))' | jq .metadata.name | wc -l) + if [[ ${count} -gt 0 ]]; then + echo "false" + exit + fi + echo "true" + tests: + bin_op: or + test_items: + - flag: "kubectl: not found" + - flag: "jq: not found" + - flag: "true" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: true diff --git a/cfg/rke-cis-1.7/config.yaml b/cfg/rke-cis-1.7/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rke-cis-1.7/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rke-cis-1.7/controlplane.yaml b/cfg/rke-cis-1.7/controlplane.yaml new file mode 100644 index 000000000..f3bf99172 --- /dev/null +++ b/cfg/rke-cis-1.7/controlplane.yaml @@ -0,0 +1,60 @@ +--- +controls: +version: "rke-cis-1.7" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/rke-cis-1.7/etcd.yaml b/cfg/rke-cis-1.7/etcd.yaml new file mode 100644 index 000000000..636ca906d --- /dev/null +++ b/cfg/rke-cis-1.7/etcd.yaml @@ -0,0 +1,136 @@ +--- +controls: +version: "rke-cis-1.7" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: true diff --git a/cfg/rke-cis-1.7/master.yaml b/cfg/rke-cis-1.7/master.yaml new file mode 100644 index 000000000..2fceed30b --- /dev/null +++ b/cfg/rke-cis-1.7/master.yaml @@ -0,0 +1,998 @@ +--- +controls: +version: "rke-cis-1.7" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for kube-apiserver. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: stat -c %a /node/var/lib/etcd + tests: + test_items: + - flag: "700" + compare: + op: eq + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + type: "skip" + audit: "stat -c %U:%G /node/var/lib/etcd" + tests: + test_items: + - flag: "etcd:etcd" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + Permissive - A system service account is required for etcd data directory ownership. + Refer to Rancher's hardening guide for more details on how to configure this ownership. + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c permissions=%a /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + Not Applicable - Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e /etc/kubernetes/admin.conf; then stat -c %U:%G /etc/kubernetes/admin.conf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + Not Applicable - Cluster provisioned by RKE does not store the kubernetes default kubeconfig credentials file on the nodes. + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for scheduler. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + Not Applicable - Cluster provisioned by RKE doesn't require or maintain a configuration file for controller-manager. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "check_files_owner_in_dir.sh /node/etc/kubernetes/ssl" + tests: + test_items: + - flag: "true" + compare: + op: eq + value: "true" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + find /node/etc/kubernetes/ssl/ -name '*.pem' ! -name '*key.pem' -exec chmod -R 600 {} + + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /node/etc/kubernetes/ssl/ -name '*key.pem' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + find /node/etc/kubernetes/ssl/ -name '*key.pem' -exec chmod -R 600 {} + + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: true + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 - NoteThis recommendation is obsolete and will be deleted per the consensus process (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.17 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.29 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + Permissive - Enabling encryption changes how data can be recovered as data is encrypted. + scored: false + + - id: 1.2.30 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: "skip" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + Permissive - Enabling encryption changes how data can be recovered as data is encrypted. + scored: false + + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: true + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + Cluster provisioned by RKE handles certificate rotation directly through RKE. + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/rke-cis-1.7/node.yaml b/cfg/rke-cis-1.7/node.yaml new file mode 100644 index 000000000..abc9d12af --- /dev/null +++ b/cfg/rke-cis-1.7/node.yaml @@ -0,0 +1,462 @@ +--- +controls: +version: "rke-cis-1.7" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + Not Applicable - Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + Not Applicable - Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet service. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a /node/etc/kubernetes/ssl/kube-ca.pem" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /node/etc/kubernetes/ssl/kube-ca.pem" + tests: + test_items: + - flag: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: true + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + Not Applicable - Clusters provisioned by RKE do not require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + Not Applicable - Clusters provisioned by RKE doesn’t require or maintain a configuration file for the kubelet. + All configuration is passed in as arguments at container run time. + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + type: "skip" + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + Not Applicable - Clusters provisioned by RKE set the --hostname-override to avoid any hostname configuration errors + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + type: "skip" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Not Applicable - Clusters provisioned by RKE handles certificate rotation directly through RKE. + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false diff --git a/cfg/rke-cis-1.7/policies.yaml b/cfg/rke-cis-1.7/policies.yaml new file mode 100644 index 000000000..207dd7808 --- /dev/null +++ b/cfg/rke-cis-1.7/policies.yaml @@ -0,0 +1,309 @@ +--- +controls: +version: "rke-cis-1.7" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "skip" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "skip" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + Permissive - Enabling Network Policies can prevent certain applications from communicating with each other. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "skip" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + Permissive - Kubernetes provides a default namespace. + scored: false diff --git a/cfg/rke2-cis-1.23/config.yaml b/cfg/rke2-cis-1.23/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rke2-cis-1.23/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rke2-cis-1.23/controlplane.yaml b/cfg/rke2-cis-1.23/controlplane.yaml new file mode 100644 index 000000000..95bccaf36 --- /dev/null +++ b/cfg/rke2-cis-1.23/controlplane.yaml @@ -0,0 +1,50 @@ +--- +controls: +version: "rke2-cis-1.23" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: "/bin/ps -ef | grep kube-apiserver | grep -v grep | grep -o audit-policy-file" + type: "skip" + tests: + test_items: + - flag: "--audit-policy-file" + compare: + op: eq + value: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/rke2-cis-1.23/etcd.yaml b/cfg/rke2-cis-1.23/etcd.yaml new file mode 100644 index 000000000..251bcd8c4 --- /dev/null +++ b/cfg/rke2-cis-1.23/etcd.yaml @@ -0,0 +1,150 @@ +--- +controls: +version: "rke2-cis-1.23" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + type: "skip" + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + type: "skip" + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + type: "skip" + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + type: "skip" + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + audit_config: "cat /var/lib/rancher/rke2/server/db/etcd/config" + tests: + bin_op: or + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/rke2/server/tls/etcd/peer-ca.crt" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/rke2-cis-1.23/master.yaml b/cfg/rke2-cis-1.23/master.yaml new file mode 100644 index 000000000..aeb766ab2 --- /dev/null +++ b/cfg/rke2-cis-1.23/master.yaml @@ -0,0 +1,1038 @@ +--- +controls: +version: "rke2-cis-1.23" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "permissions" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 644 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 644 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/db/etcd" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + type: "skip" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/db/etcd" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "stat -c %a /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig" + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 644 or more restrictive (Automated)" + audit: "stat -c %a /var/lib/rancher/rke2/server/cred/controller.kubeconfig" + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/controller.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root /var/lib/rancher/rke2/server/cred/controller.kubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive (Manual)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.crt" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 644 /var/lib/rancher/rke2/server/tls/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.key" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/lib/rancher/rke2/server/tls/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + set: true + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--request-timeout" + set: false + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: "skip" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + type: skip + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + type: skip + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/rke2-cis-1.23/node.yaml b/cfg/rke2-cis-1.23/node.yaml new file mode 100644 index 000000000..b7d97a0e3 --- /dev/null +++ b/cfg/rke2-cis-1.23/node.yaml @@ -0,0 +1,466 @@ +--- +controls: +version: "rke2-cis-1.23" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 644 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + set: true + compare: + op: eq + value: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual)" + audit: "check_cafile_permissions.sh" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + set: true + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 644 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: "check_cafile_ownership.sh" + tests: + test_items: + - flag: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + set: true + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 $kubeletconf + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + set: true + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + set: true + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + type: skip + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/rke2-cis-1.23/policies.yaml b/cfg/rke2-cis-1.23/policies.yaml new file mode 100644 index 000000000..87db5aa50 --- /dev/null +++ b/cfg/rke2-cis-1.23/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "rke2-cis-1.23" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/rke2-cis-1.24/config.yaml b/cfg/rke2-cis-1.24/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rke2-cis-1.24/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rke2-cis-1.24/controlplane.yaml b/cfg/rke2-cis-1.24/controlplane.yaml new file mode 100644 index 000000000..9dc031778 --- /dev/null +++ b/cfg/rke2-cis-1.24/controlplane.yaml @@ -0,0 +1,50 @@ +--- +controls: +version: "rke2-cis-1.24" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + audit: "/bin/ps -ef | grep kube-apiserver | grep -v grep | grep -o audit-policy-file" + type: "skip" + tests: + test_items: + - flag: "--audit-policy-file" + compare: + op: eq + value: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/rke2-cis-1.24/etcd.yaml b/cfg/rke2-cis-1.24/etcd.yaml new file mode 100644 index 000000000..1e62ff433 --- /dev/null +++ b/cfg/rke2-cis-1.24/etcd.yaml @@ -0,0 +1,150 @@ +--- +controls: +version: "rke2-cis-1.24" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: true + type: "skip" + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + type: "skip" + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + type: "skip" + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + type: "skip" + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + audit_config: "cat /var/lib/rancher/rke2/server/db/etcd/config" + tests: + bin_op: or + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/rke2/server/tls/etcd/peer-ca.crt" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/rke2-cis-1.24/master.yaml b/cfg/rke2-cis-1.24/master.yaml new file mode 100644 index 000000000..a11048d14 --- /dev/null +++ b/cfg/rke2-cis-1.24/master.yaml @@ -0,0 +1,1051 @@ +--- +controls: +version: "rke2-cis-1.24" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "permissions" + compare: + op: eq + value: "644" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 644 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "644" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 644 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + '/bin/sh -c "if [[ -e /etc/cni/net.d ]]; then + ps -fC "${kubeletbin:-kubelet}" | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + else + echo "File not found" + fi' + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "root:root" + - flag: "File not found" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/db/etcd" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + type: "skip" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/db/etcd" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c %a /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c %a /var/lib/rancher/rke2/server/cred/controller.kubeconfig" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/controller.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root /var/lib/rancher/rke2/server/cred/controller.kubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.crt" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/lib/rancher/rke2/server/tls/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: | + '/bin/sh -c if test -e "/var/lib/rancher/rke2/server/tls/*.key"; then + stat -c "%a" "/var/lib/rancher/rke2/server/tls/*.key" + else + echo "File not found" + fi' + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "permissions" + - flag: "File not found" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/lib/rancher/rke2/server/tls/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--kubelet-https" + compare: + op: eq + value: true + - flag: "--kubelet-https" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + set: true + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.17 + text: "Ensure that the --secure-port argument is not set to 0 (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.18 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: "skip" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.23 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--request-timeout" + set: false + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.25 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.29 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.30 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.31 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: "skip" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.32 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + type: skip + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: false + type: skip + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/rke2-cis-1.24/node.yaml b/cfg/rke2-cis-1.24/node.yaml new file mode 100644 index 000000000..dcbe85793 --- /dev/null +++ b/cfg/rke2-cis-1.24/node.yaml @@ -0,0 +1,466 @@ +--- +controls: +version: "rke2-cis-1.24" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + set: true + compare: + op: eq + value: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: "check_cafile_permissions.sh" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: "check_cafile_ownership.sh" + tests: + test_items: + - flag: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + set: true + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + compare: + op: eq + value: true + set: true + - flag: --protect-kernel-defaults + path: '{.protectKernelDefaults}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + type: skip + + - id: 4.2.9 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + type: skip + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/rke2-cis-1.24/policies.yaml b/cfg/rke2-cis-1.24/policies.yaml new file mode 100644 index 000000000..2613f850d --- /dev/null +++ b/cfg/rke2-cis-1.24/policies.yaml @@ -0,0 +1,269 @@ +--- +controls: +version: "rke2-cis-1.24" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/rke2-cis-1.7/config.yaml b/cfg/rke2-cis-1.7/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/rke2-cis-1.7/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/rke2-cis-1.7/controlplane.yaml b/cfg/rke2-cis-1.7/controlplane.yaml new file mode 100644 index 000000000..03d38658c --- /dev/null +++ b/cfg/rke2-cis-1.7/controlplane.yaml @@ -0,0 +1,62 @@ +--- +controls: +version: "rke2-cis-1.7" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + Permissive. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/rke2-cis-1.7/etcd.yaml b/cfg/rke2-cis-1.7/etcd.yaml new file mode 100644 index 000000000..885e5d442 --- /dev/null +++ b/cfg/rke2-cis-1.7/etcd.yaml @@ -0,0 +1,154 @@ +--- +controls: +version: "rke2-cis-1.7" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--cert-file" + env: "ETCD_CERT_FILE" + - flag: "--key-file" + env: "ETCD_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + Not Applicable. + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + Not Applicable. + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + set: true + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + set: true + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + Not Applicable. + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + type: "skip" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + Not Applicable. + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + set: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + audit_config: "cat /var/lib/rancher/rke2/server/db/etcd/config" + tests: + bin_op: or + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/rke2/server/tls/etcd/peer-ca.crt" + set: true + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: true diff --git a/cfg/rke2-cis-1.7/master.yaml b/cfg/rke2-cis-1.7/master.yaml new file mode 100644 index 000000000..f7734b8cf --- /dev/null +++ b/cfg/rke2-cis-1.7/master.yaml @@ -0,0 +1,1027 @@ +--- +controls: +version: "rke2-cis-1.7" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/agent/pod-manifests/kube-apiserver.yaml" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then stat -c %U:%G $etcdconf; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/db/etcd" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + set: true + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + type: "skip" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/db/etcd" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/admin.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/scheduler.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "600" + compare: + op: eq + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/cred/controller.kubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root /var/lib/rancher/rke2/server/cred/controller.kubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/rke2/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.crt" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/lib/rancher/rke2/server/tls/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.key" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /var/lib/rancher/rke2/server/tls/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and remove the `DenyServiceExternalIPs` + from enabled admission plugins. + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + set: true + - flag: "--kubelet-client-key" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + set: true + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + set: true + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + set: true + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + set: true + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.16 + text: "Ensure that the --secure-port argument is not set to 0 - NoteThis recommendation is obsolete and will be deleted per the consensus process (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--secure-port" + compare: + op: gt + value: 0 + set: true + - flag: "--secure-port" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.17 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-path argument is set (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + Permissive. + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + Permissive. + scored: true + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + Permissive. + scored: true + + - id: 1.2.21 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.22 + text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--request-timeout" + set: false + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: true + + - id: 1.2.23 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.24 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + set: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.26 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.27 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.28 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + set: true + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.29 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.30 + text: "Ensure that encryption providers are appropriately configured (Manual)" + type: "skip" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + Permissive. + scored: false + + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + type: skip + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + Not Applicable. + scored: false + + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + set: true + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + type: skip + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/rke2-cis-1.7/node.yaml b/cfg/rke2-cis-1.7/node.yaml new file mode 100644 index 000000000..a390e39a5 --- /dev/null +++ b/cfg/rke2-cis-1.7/node.yaml @@ -0,0 +1,458 @@ +--- +controls: +version: "rke2-cis-1.7" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + Not Applicable. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + type: "skip" + tests: + test_items: + - flag: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + Not applicable. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + set: true + compare: + op: eq + value: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: "check_cafile_permissions.sh" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: "check_cafile_ownership.sh" + tests: + test_items: + - flag: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + set: true + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + set: true + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin " + type: skip + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + Not Applicable. + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false diff --git a/cfg/rke2-cis-1.7/policies.yaml b/cfg/rke2-cis-1.7/policies.yaml new file mode 100644 index 000000000..75a3efea7 --- /dev/null +++ b/cfg/rke2-cis-1.7/policies.yaml @@ -0,0 +1,304 @@ +--- +controls: +version: "rke2-cis-1.7" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/tkgi-1.2.53/config.yaml b/cfg/tkgi-1.2.53/config.yaml new file mode 100644 index 000000000..b7839455a --- /dev/null +++ b/cfg/tkgi-1.2.53/config.yaml @@ -0,0 +1,2 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml diff --git a/cfg/tkgi-1.2.53/controlplane.yaml b/cfg/tkgi-1.2.53/controlplane.yaml new file mode 100644 index 000000000..4f3ab677e --- /dev/null +++ b/cfg/tkgi-1.2.53/controlplane.yaml @@ -0,0 +1,67 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users" + audit: ps -ef | grep kube-apiserver | grep -- "--oidc-issuer-url=" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + Exception + This setting is site-specific. It can be set in the "Configure created clusters to use UAA as the OIDC provider." + section of the "UAA" + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-policy-file=" + tests: + test_items: + - flag: "--audit-policy-file" + remediation: | + Create an audit policy file for your cluster. + scored: true + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns" + audit: | + diff /var/vcap/jobs/kube-apiserver/config/audit_policy.yml \ <(echo "--- apiVersion: audit.k8s.io/v1beta1 kind: + Policy rules: - level: None resources: - group: '' resources: - endpoints - services - services/status users: - + system:kube-proxy verbs: - watch - level: None resources: - group: '' resources: - nodes - nodes/status users: - + kubelet verbs: - get - level: None resources: - group: '' resources: - nodes - nodes/status userGroups: - + system:nodes verbs: - get - level: None namespaces: - kube-system resources: - group: '' resources: - + endpoints users: - system:kube-controller-manager - system:kube-scheduler - system:serviceaccount:kube- + system:endpoint-controller verbs: - get - update - level: None resources: - group: '' resources: - namespaces - + namespaces/status - namespaces/finalize users: - system:apiserver verbs: - get - level: None resources: - + group: metrics.k8s.io users: - system:kube-controller-manager verbs: - get - list - level: None + nonResourceURLs: - \"/healthz*\" - \"/version\" - \"/swagger*\" - level: None resources: - group: '' resources: - + events - level: Request omitStages: - RequestReceived resources: - group: '' resources: - nodes/status - + pods/status userGroups: - system:nodes verbs: - update - patch - level: Request omitStages: - + RequestReceived users: - system:serviceaccount:kube-system:namespace-controller verbs: - deletecollection - + level: Metadata omitStages: - RequestReceived resources: - group: '' resources: - secrets - configmaps - group: + authentication.k8s.io resources: - tokenreviews - level: Request omitStages: - RequestReceived resources: - + group: '' - group: admissionregistration.k8s.io - group: apiextensions.k8s.io - group: apiregistration.k8s.io - + group: apps - group: authentication.k8s.io - group: authorization.k8s.io - group: autoscaling - group: batch - + group: certificates.k8s.io - group: extensions - group: metrics.k8s.io - group: networking.k8s.io - group: policy - + group: rbac.authorization.k8s.io - group: settings.k8s.io - group: storage.k8s.io verbs: - get - list - watch - level: + RequestResponse omitStages: - RequestReceived resources: - group: '' - group: admissionregistration.k8s.io - + group: apiextensions.k8s.io - group: apiregistration.k8s.io - group: apps - group: authentication.k8s.io - group: + authorization.k8s.io - group: autoscaling - group: batch - group: certificates.k8s.io - group: extensions - group: + metrics.k8s.io - group: networking.k8s.io - group: policy - group: rbac.authorization.k8s.io - group: + settings.k8s.io - group: storage.k8s.io - level: Metadata omitStages: - RequestReceived ") + type: "manual" + remediation: | + Consider modification of the audit policy in use on the cluster to include these items, at a + minimum. + scored: false diff --git a/cfg/tkgi-1.2.53/etcd.yaml b/cfg/tkgi-1.2.53/etcd.yaml new file mode 100644 index 000000000..8f99b7f0f --- /dev/null +++ b/cfg/tkgi-1.2.53/etcd.yaml @@ -0,0 +1,121 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration Files" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate" + audit: ps -ef | grep etcd | grep -- "--cert-file=/var/vcap/jobs/etcd/config/etcd.crt" | grep -- "--key-file=/var/vcap/jobs/etcd/config/etcd.key" + type: manual + tests: + bin_op: and + test_items: + - flag: "--cert-file" + - flag: "--key-file" + remediation: | + Follow the etcd service documentation and configure TLS encryption. + Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml + on the master node and set the below parameters. + --cert-file= + --key-file= + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true" + audit: ps -ef | grep etcd | grep -- "--client\-cert\-auth" + tests: + test_items: + - flag: "--client-cert-auth" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file etcd config on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true" + audit: ps -ef | grep etcd | grep -v -- "--auto-tls" + tests: + test_items: + - flag: "--auto-tls" + compare: + op: eq + value: true + set: false + remediation: | + Edit the etcd pod specification file etcd config on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate" + audit: ps -ef | grep etcd | grep -- "--peer-cert-file=/var/vcap/jobs/etcd/config/peer.crt" | grep -- "--peer-key-file=/var/vcap/jobs/etcd/config/peer.key" + type: manual + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + - flag: "--peer-key-file" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file etcd config on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: false + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true" + audit: ps -ef | grep etcd | grep -- "--peer\-client\-cert\-auth" + tests: + test_items: + - flag: "--peer-client-cert-auth" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file etcd config on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true" + audit: ps -ef | grep etcd | grep -v -- "--peer-auto-tls" + tests: + test_items: + - flag: "--peer-auto-tls" + compare: + op: eq + value: true + set: false + remediation: | + Edit the etcd pod specification file etcd config on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd" + audit: diff /var/vcap/jobs/kube-apiserver/config/kubernetes-ca.pem /var/vcap/jobs/etcd/config/etcd-ca.crt | grep -c"^>" | grep -v "^0$" + type: manual + tests: + test_items: + - flag: "--trusted-ca-file" + remediation: | + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file etcd config on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/tkgi-1.2.53/master.yaml b/cfg/tkgi-1.2.53/master.yaml new file mode 100644 index 000000000..8d1945700 --- /dev/null +++ b/cfg/tkgi-1.2.53/master.yaml @@ -0,0 +1,1098 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 1 +text: "Master Node Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Master Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-apiserver/config/bpm.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chmod 644 /var/vcap/jobs/kube-apiserver/config/bpm.yml + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kube-apiserver/config/bpm.yml + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chown root:root /var/vcap/jobs/kube-apiserver/config/bpm.yml + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-controller-manager/config/bpm.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chmod 644 /var/vcap/jobs/kube-apiserver/config/bpm.yml + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kube-controller-manager/config/bpm.yml + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chown root:root /etc/kubernetes/manifests/kube-controller-manager.yaml + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-scheduler/config/bpm.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the + master node. + For example, chown 644 /var/vcap/jobs/kube-scheduler/config/bpm.yml + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kube-scheduler/config/bpm.yml + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /var/vcap/jobs/kube-scheduler/config/bpm.yml + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/etcd/config/bpm.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 stat -c permissions=%a /var/vcap/jobs/etcd/config/bpm.yml + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/etcd/config/bpm.yml + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /var/vcap/jobs/etcd/config/bpm.yml + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 644 or more restrictive" + audit: find ((CNI_DIR))/config/ -type f -not -perm 640 | awk 'END{print NR}' | grep "^0$" + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root" + audit: find ((CNI_DIR))/config/ -type f -not -user root -or -not -group root | awk 'END{print NR}' | grep "^0$" + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive" + audit: stat -c permissions=%a /var/vcap/store/etcd/ + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/vcap/store/etcd/ + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd" + audit: stat -c %U:%G /var/vcap/store/etcd/ + type: manual + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/vcap/store/etcd/ + Exception: All bosh processes run as vcap user + The etcd data directory ownership is vcap:vcap + scored: false + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /etc/kubernetes/admin.conf + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 /etc/kubernetes/admin.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root" + audit: stat -c %U:%G /etc/kubernetes/admin.conf + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /etc/kubernetes/admin.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.15 + text: "Ensure that the scheduler configuration file permissions are set to 644" + audit: stat -c permissions=%a /etc/kubernetes/scheduler.conf + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 /etc/kubernetes/scheduler.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.16 + text: "Ensure that the scheduler configuration file ownership is set to root:root" + audit: stat -c %U:%G /etc/kubernetes/scheduler.conf + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /etc/kubernetes/scheduler.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.17 + text: "Ensure that the controller manager configuration file permissions are set to 644" + audit: stat -c permissions=%a /etc/kubernetes/controller-manager.conf + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod 644 /etc/kubernetes/controller-manager.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.18 + text: "Ensure that the controller manager configuration file ownership is set to root:root" + audit: stat -c %U:%G /etc/kubernetes/controller-manager.conf + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown root:root /etc/kubernetes/controller-manager.conf + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on + master + Reference: https://kubernetes.io/docs/reference/setup-tools/kubeadm/implementation-details/#generate- + kubeconfig-files-for-control-plane-components + scored: false + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root" + audit: | + find -L /var/vcap/jobs/kube-apiserver/config /var/vcap/jobs/kube-controller-manager/config /var/vcap/jobs/kube- + scheduler/config ((CNI_DIR))/config /var/vcap/jobs/etcd/config | sort -u | xargs ls -ld | awk '{ print $3 " " $4}' | + grep -c -v "root root" | grep "^0$" + type: manual + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chown -R root:root /etc/kubernetes/pki/ + Exception + Files are group owned by vcap + scored: false + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 644 or more restrictive" + audit: | + find -L /var/vcap/jobs/kube-apiserver/config \( -name '*.crt' -or -name '*.pem' \) -and -not -perm 640 | grep -v + "packages/golang" | grep -v "packages/ncp_rootfs" | awk 'END{print NR}' | grep "^0$" + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 644 /etc/kubernetes/pki/*.crt + Exception + Ignoring packages/golang as the package includes test certs used by golang. Ignoring packages/ncp_rootfs on + TKG1 with NSX-T container plugin uses the package is used as the overlay filesystem `mount | grep + "packages/ncp_rootfs"` + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600" + audit: | + find -L /var/vcap/jobs/kube-apiserver/config -name '*.key' -and -not -perm 600 | awk 'END{print NR}' | grep "^0$" + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: eq + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + Exception + Permission on etcd .key files is set to 640, to allow read access to vcap group + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false" + audit: ps -ef | grep kube-apiserver | grep -- "--anonymous-auth=false" + type: manual + tests: + test_items: + - flag: "--anonymous-auth=false" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the below parameter. + --anonymous-auth=false + Exception + The flag is set to true to enable API discoveribility. + "Starting in 1.6, the ABAC and RBAC authorizers require explicit authorization of the system:anonymous user or the + system:unauthenticated group, so legacy policy rules that grant access to the * user or * group do not include + anonymous users." + -authorization-mode is set to RBAC + scored: false + + - id: 1.2.2 + text: "Ensure that the --basic-auth-file argument is not set" + audit: ps -ef | grep kube-apiserver | grep -v -- "--basic-auth-file" + tests: + test_items: + - flag: "--basic-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file kube-apiserver + on the master node and remove the --basic-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --token-auth-file parameter is not set" + audit: ps -ef | grep "/var/vcap/packages/kubernetes/bin/kube-apiserve[r]" | grep -v tini | grep -v -- "--token-auth-file=" + type: manual + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file /var/vcap/packages/kubernetes/bin/kube-apiserve[r] + on the master node and remove the --token-auth-file= parameter. + Exception + Since k8s processes' lifecyle are managed by BOSH, token based authentication is required when processes + restart. The file has 0640 permission and root:vcap ownership + scored: false + + - id: 1.2.4 + text: "Ensure that the --kubelet-https argument is set to true" + audit: ps -ef | grep kube-apiserver | grep -v -- "--kubelet-https=true" + tests: + test_items: + - flag: "--kubelet-https=true" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and remove the --kubelet-https parameter. + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -- "--kubelet-client-certificate=/var/vcap/jobs/kube-apiserver/config/kubelet- + client-cert.pem" | grep -- "--kubelet-client-key=/var/vcap/jobs/kube-apiserver/config/kubelet-client-key.pem" + type: manual + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + kube-apiserver on the master node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: false + + - id: 1.2.6 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate" + audit: ps -ef | grep kube-apiserver | grep -- "--kubelet-certificate-authority=" + type: manual + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + kube-apiserver on the master node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + Exception + JIRA ticket #PKS-696 created to investigate a fix. PR opened to address the issue https://github.com/cloudfoundry- + incubator/kubo-release/pull/179 + scored: false + + - id: 1.2.7 + text: "Ensure API server authorization modes does not include AlwaysAllow" + audit: | + ps -ef | grep kube-apiserver | grep -- "--authorization-mode" && ps -ef | grep kube-apiserver | grep -v -- "-- + authorization-mode=\(\w\+\|,\)*AlwaysAllow\(\w\+\|,\)*" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes Node" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--authorization-mode=\(\w\+\|,\)*Node\(\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + Exception + This flag can be added using Kubernetes Profiles. Please follow instructions here https://docs.pivotal.io/tkgi/1- + 8/k8s-profiles.html + scored: false + + - id: 1.2.9 + text: "Ensure that the --authorization-mode argument includes RBAC" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--authorization-mode=\(\w\+\|,\)*RBAC\(\w\+\|,\)* --" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --authorization-mode parameter to a value that includes RBAC, + for example: + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.10 + text: "Ensure that the admission control plugin EventRateLimit is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*EventRateLimit\ + (\w\+\|,\)*" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file kube-apiserver + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + Exception + "Note: This is an Alpha feature in the Kubernetes v1.13" + Control provides rate limiting and is site-specific + scored: false + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysAdmit is not set" + audit: | + ps -ef | grep kube-apiserver | grep -v -- "--enable-admission-plugins=\(\w\+\|,\)*AlwaysAdmit\(\w\+\|,\)*" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.12 + text: "Ensure that the admission control plugin AlwaysPullImages is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*AlwaysPullImages\ + (\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + Exception + "Credentials would be required to pull the private images every time. Also, in trusted + environments, this might increases load on network, registry, and decreases speed. + This setting could impact offline or isolated clusters, which have images pre-loaded and do + not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + TKGi is packages with pre-loaded images. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin SecurityContextDeny is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*SecurityContextDeny\ + (\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --enable-admission-plugins parameter to include + SecurityContextDeny, unless PodSecurityPolicy is already in place. + --enable-admission-plugins=...,SecurityContextDeny,... + Exception + This setting is site-specific. It can be set in the "Admission Plugins" section of the appropriate "Plan" + scored: false + + - id: 1.2.14 + text: "Ensure that the admission control plugin ServiceAccount is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--disable-admission-plugins=\(\w\+\|,\)*ServiceAccount\ + (\w\+\|,\)* --" + tests: + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file kube-apiserver + on the master node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NamespaceLifecycle is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--disable-admission-plugins=\ + (\w\+\|,\)*NamespaceLifecycle\(\w\+\|,\)* --" + tests: + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.16 + text: "Ensure that the admission control plugin PodSecurityPolicy is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*PodSecurityPolicy\ + (\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Follow the documentation and create Pod Security Policy objects as per your environment. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the --enable-admission-plugins parameter to a + value that includes PodSecurityPolicy: + --enable-admission-plugins=...,PodSecurityPolicy,... + Then restart the API Server. + Exception + This setting is site-specific. It can be set in the "Admission Plugins" section of the appropriate "Plan" + scored: false + + - id: 1.2.17 + text: "Ensure that the admission control plugin NodeRestriction is set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--enable-admission-plugins=\(\w\+\|,\)*NodeRestriction\ + (\w\+\|,\)* --" + type: manual + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + Exception + PR opened to address the issue https://github.com/cloudfoundry-incubator/kubo-release/pull/179" + scored: true + + - id: 1.2.18 + text: "Ensure that the --insecure-bind-address argument is not set" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--insecure-bind-address" + tests: + test_items: + - flag: "--insecure-bind-address" + set: false + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and remove the --insecure-bind-address parameter. + scored: true + + - id: 1.2.19 + text: "Ensure that the --insecure-port argument is set to 0" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--insecure-port=0" + type: manual + tests: + test_items: + - flag: "--insecure-port=0" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the below parameter. + --insecure-port=0 + Exception + Related to 1.2.1 + The insecure port is 8080, and is binding only to localhost on the master node, in use by other components on the + master that are bypassing authn/z. + The components connecting to the APIServer are: + kube-controller-manager + kube-proxy + kube-scheduler + Pods are not scheduled on the master node. + scored: false + + - id: 1.2.20 + text: "Ensure that the --secure-port argument is not set to 0" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--secure-port=0" + tests: + test_items: + - flag: "--secure-port" + compare: + op: noteq + value: 0 + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and either remove the --secure-port parameter or + set it to a different (non-zero) desired port. + scored: true + + - id: 1.2.21 + text: "Ensure that the --profiling argument is set to false" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--profiling=false" + tests: + test_items: + - flag: "--profiling=false" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.22 + text: "Ensure that the --audit-log-path argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-path=\/var\/vcap\/sys\/log\/kube-apiserver\/audit.log" + type: manual + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example: + --audit-log-path=/var/log/apiserver/audit.log + scored: false + + - id: 1.2.23 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-maxage=30" + type: manual + tests: + test_items: + - flag: "--audit-log-maxage=30" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --audit-log-maxage parameter to 30 or as an appropriate number of days: + --audit-log-maxage=30 + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false + + - id: 1.2.24 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-maxbackup=10" + type: manual + tests: + test_items: + - flag: "--audit-log-maxbackup=10" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. + --audit-log-maxbackup=10 + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false + + - id: 1.2.25 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--audit-log-maxsize=100" + type: manual + tests: + test_items: + - flag: "--audit-log-maxsize=100" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB: + --audit-log-maxsize=100 + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false + + - id: 1.2.26 + text: "Ensure that the --request-timeout argument is set as appropriate" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--request-timeout=" + type: manual + tests: + test_items: + - flag: "--request-timeout" + remediation: | + Edit the API server pod specification file kube-apiserver + and set the below parameter as appropriate and if needed. + For example, + --request-timeout=300s + scored: false + + - id: 1.2.27 + text: "Ensure that the --service-account-lookup argument is set to true" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -v -- "--service-account-lookup" + tests: + test_items: + - flag: "--service-account-lookup=true" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.28 + text: "Ensure that the --service-account-key-file argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--service-account-key-file=/var/vcap/jobs/kube- + apiserver/config/service-account-public-key.pem" + type: manual + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file kube-apiserver + on the master node and set the --service-account-key-file parameter + to the public key file for service accounts: + --service-account-key-file= + scored: false + + - id: 1.2.29 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--etcd-certfile=/var/vcap/jobs/kube-apiserver/config/etcd- + client.crt" | grep -- "--etcd-keyfile=/var/vcap/jobs/kube-apiserver/config/etcd-client.key" + type: manual + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: false + + - id: 1.2.30 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--tls-cert-file=/var/vcap/jobs/kube-apiserver/config/kubernetes.pem" | grep -- "--tls-private-key-file=/var/vcap/jobs/kube- + apiserver/config/kubernetes-key.pem" + type: manual + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: false + + - id: 1.2.31 + text: "Ensure that the --client-ca-file argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--client-ca-file=/var/vcap/jobs/kube-apiserver/config/kubernetes-ca.pem" + type: manual + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the client certificate authority file. + --client-ca-file= + scored: false + + - id: 1.2.32 + text: "Ensure that the --etcd-cafile argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--etcd-cafile=/var/vcap/jobs/kube-apiserver/config/etcd-ca.crt" + type: manual + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: false + + - id: 1.2.33 + text: "Ensure that the --encryption-provider-config argument is set as appropriate" + audit: | + ps -ef | grep kube-apiserver | grep -v tini | grep -- "--encryption-provider-config=" + type: manual + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file kube-apiserver + on the master node and set the --encryption-provider-config parameter to the path of that file: --encryption-provider-config= + Exception + Encrypting Secrets in an etcd database can be enabled using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles-encrypt-etcd.html + scored: false + + - id: 1.2.34 + text: "Ensure that the encryption provider is set to aescbc" + audit: | + ENC_CONF=`ps -ef | grep kube-apiserver | grep -v tini | sed $'s/ /\\\\\\n/g' | grep -- '--encryption-provider- + config=' | cut -d'=' -f2` grep -- "- \(aescbc\|kms\|secretbox\):" $ENC_CONF + type: manual + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + Exception + Encrypting Secrets in an etcd database can be enabled using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles-encrypt-etcd.html + scored: false + + - id: 1.2.35 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers" + audit: ps -ef | grep kube-apiserver | grep -v tini | grep -- "--tls-cipher-suites=" + type: manual + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the master node and set the below parameter. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM + _SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM + _SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM + _SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate" + audit: ps -ef | grep kube-controller-manager | grep -- "--terminated-pod-gc-threshold=100" + type: manual + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example: + --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure controller manager profiling is disabled" + audit: ps -ef | grep kube-controller-manager | grep -- "--profiling=false" + tests: + test_items: + - flag: "--profiling=false" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true" + audit: ps -ef | grep kube-controller-manager | grep -- "--use\-service\-account\-credentials=true" + tests: + test_items: + - flag: "--use-service-account-credentials=true" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate" + audit: | + ps -ef | grep kube-controller-manager | grep -- "--service\-account\-private\-key\-file=\/var\/vcap\/jobs\/kube\- + controller\-manager\/config\/service\-account\-private\-key.pem" + type: manual + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: false + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate" + audit: | + ps -ef | grep kube-controller-manager | grep -- "--root\-ca\-file=\/var\/vcap\/jobs\/kube\-controller\-manager\/config\/ca.pem" + type: manual + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: false + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true" + audit: | + ps -ef | grep kube-controller-manager | grep -- "--feature-gates=\ + (\w\+\|,\)*RotateKubeletServerCertificate=true\(\w\+\|,\)*" + type: manual + tests: + test_items: + - flag: "--feature-gates=RotateKubeletServerCertificate=true" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + Exception + Certificate rotation is handled by Credhub + scored: false + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1" + audit: | + ps -ef | grep "/var/vcap/packages/kubernetes/bin/kube-controller-manage[r]" | grep -v tini | grep -- "--bind-address=127.0.0.1" + type: manual + tests: + test_items: + - flag: "--bind-address=127.0.0.1" + remediation: | + Edit the Controller Manager pod specification file controller manager conf + on the master node and ensure the correct value for the --bind-address parameter + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false" + audit: ps -ef | grep kube-scheduler | grep -v tini | grep -- "--profiling=false" + tests: + test_items: + - flag: "--profiling=false" + remediation: | + Edit the Scheduler pod specification file scheduler config file + on the master node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1" + audit: ps -ef | grep "/var/vcap/packages/kubernetes/bin/kube-schedule[r]" | grep -v tini | grep -- "--bind-address=127.0.0.1" + type: manual + tests: + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + remediation: | + Edit the Scheduler pod specification file scheduler config + on the master node and ensure the correct value for the --bind-address parameter + Exception + This setting can be set to expected value using Kubernetes Profiles. Please follow instructions here + https://docs.pivotal.io/tkgi/1-8/k8s-profiles.html + scored: false diff --git a/cfg/tkgi-1.2.53/node.yaml b/cfg/tkgi-1.2.53/node.yaml new file mode 100644 index 000000000..9fafab944 --- /dev/null +++ b/cfg/tkgi-1.2.53/node.yaml @@ -0,0 +1,418 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kubelet/monit + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 /var/vcap/jobs/kubelet/monit + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kubelet/monit + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root /var/vcap/jobs/kubelet/monit + Exception + File is group owned by vcap + scored: true + + - id: 4.1.3 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-proxy/config/kubeconfig + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 /var/vcap/jobs/kube-proxy/config/kubeconfig + scored: true + + - id: 4.1.4 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kube-proxy/config/kubeconfig + type: manual + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root /var/vcap/jobs/kube-proxy/config/kubeconfig + Exception + File is group owned by vcap + scored: false + + - id: 4.1.5 + text: "Ensure that the kubelet.conf file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kube-proxy/config/kubeconfig + type: manual + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 644 /var/vcap/jobs/kube-proxy/config/kubeconfig + Exception + kubeadm is not used to provision/bootstrap the cluster. kubeadm and associated config files do not exist on worker + scored: false + + - id: 4.1.6 + text: "Ensure that the kubelet.conf file ownership is set to root:root" + audit: stat -c %U:%G /etc/kubernetes/kubelet.conf + type: manual + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root /etc/kubernetes/kubelet.conf + Exception + file ownership is vcap:vcap + scored: false + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kubelet/config/kubelet-client-ca.pem + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 644 + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kubelet/config/kubelet-client-ca.pem + type: manual + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + Exception + File is group owned by vcap + scored: false + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 644 or more restrictive" + audit: stat -c permissions=%a /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 644 /var/vcap/jobs/kubelet/config/kubeletconfig.yml + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root" + audit: stat -c %U:%G /var/vcap/jobs/kubelet/config/kubeletconfig.yml + type: manual + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root /var/vcap/jobs/kubelet/config/kubeletconfig.yml + Exception + File is group owned by vcap + scored: false + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the anonymous-auth argument is set to false" + audit: grep "^authentication:\n\s{2}anonymous:\n\s{4}enabled:\sfalse$" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "enabled: false" + remediation: | + If using a Kubelet config file, edit the file to set authentication: anonymous: enabled to + false. + If using executable arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --anonymous-auth=false + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow" + audit: | + grep "^authorization:\n\s{2}mode: AlwaysAllow$" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "AlwaysAllow" + set: false + remediation: | + If using a Kubelet config file, edit the file to set authorization: mode to Webhook. If + using executable arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate" + audit: | + grep ^authentication:\n\s{2}anonymous:\n\s{4}enabled:\sfalse\n(\s{2}webhook:\n\s{4}cacheTTL:\s\d+s\n\s{4}enabled:.*\n)? + \s{2}x509:\n\s{4}clientCAFile:\s"\/var\/vcap\/jobs\/kubelet\/config\/kubelet-client-ca\.pem" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "clientCAFile" + remediation: | + If using a Kubelet config file, edit the file to set authentication: x509: clientCAFile to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Ensure that the --read-only-port argument is set to 0" + audit: | + grep "readOnlyPort: 0" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "readOnlyPort: 0" + remediation: | + If using a Kubelet config file, edit the file to set readOnlyPort to 0. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0" + audit: | + grep -- "streamingConnectionIdleTimeout: 0" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "streamingConnectionIdleTimeout: 0" + set: false + remediation: | + If using a Kubelet config file, edit the file to set streamingConnectionIdleTimeout to a + value other than 0. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.6 + text: "Ensure that the --protect-kernel-defaults argument is set to true" + audit: | + grep -- "protectKernelDefaults: true" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "protectKernelDefaults: true" + remediation: | + If using a Kubelet config file, edit the file to set protectKernelDefaults: true. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --protect-kernel-defaults=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --make-iptables-util-chains argument is set to true" + audit: | + grep -- "makeIPTablesUtilChains: true" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + test_items: + - flag: "makeIPTablesUtilChains: true" + remediation: | + If using a Kubelet config file, edit the file to set makeIPTablesUtilChains: true. + If using command line arguments, edit the kubelet service file + on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.8 + text: "Ensure that the --hostname-override argument is not set" + audit: | + ps -ef | grep [k]ubelet | grep -- --[c]onfig=/var/vcap/jobs/kubelet/config/kubeletconfig.yml | grep -v -- --hostname-override + type: manual + remediation: | + Edit the kubelet service file + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Exception + On GCE, the hostname needs to be set to the instance name so the gce cloud provider can manage the instance. + In other cases its set to the IP address of the VM. + scored: false + + - id: 4.2.9 + text: "Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture" + audit: grep -- "--event-qps" /var/vcap/jobs/kubelet/config/kubeletconfig.yml + type: manual + tests: + test_items: + - flag: "--event-qps" + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate" + audit: | + grep ^tlsCertFile:\s\"\/var\/vcap\/jobs\/kubelet\/config\/kubelet\.pem\"\ntlsPrivateKeyFile:\s\"\/var\/vcap\/jobs\/kubelet\/config\/kubelet-key\.pem\"$ + /var/vcap/jobs/kubelet/config/kubeletconfig.yml + tests: + bin_op: and + test_items: + - flag: "tlsCertFile" + - flag: "tlsPrivateKeyFile" + remediation: | + If using a Kubelet config file, edit the file to set tlsCertFile to the location + of the certificate file to use to identify this Kubelet, and tlsPrivateKeyFile + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Ensure that the --rotate-certificates argument is not set to false" + audit: ps -ef | grep kubele[t] | grep -- "--rotate-certificates=false" + type: manual + tests: + test_items: + - flag: "--rotate-certificates=false" + set: false + remediation: | + If using a Kubelet config file, edit the file to add the line rotateCertificates: true or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Exception + Certificate rotation is handled by Credhub + scored: false + + - id: 4.2.12 + text: "Verify that the RotateKubeletServerCertificate argument is set to true" + audit: ps -ef | grep kubele[t] | grep -- "--feature-gates=\(\w\+\|,\)*RotateKubeletServerCertificate=true\(\w\+\|,\)*" + type: manual + tests: + test_items: + - flag: "RotateKubeletServerCertificate=true" + remediation: | + Edit the kubelet service file + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + Exception + Certificate rotation is handled by Credhub + scored: false + + - id: 4.2.13 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers" + audit: ps -ef | grep kubele[t] | grep -- "--tls-cipher- + suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384" + type: manual + tests: + test_items: + - flag: --tls-cipher-suites + compare: + op: regex + value: (TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256|TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256|TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305|TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384|TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305|TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384|TLS_RSA_WITH_AES_256_GCM_SHA384|TLS_RSA_WITH_AES_128_GCM_SHA256) + remediation: | + If using a Kubelet config file, edit the file to set tlsCipherSuites: to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false diff --git a/cfg/tkgi-1.2.53/policies.yaml b/cfg/tkgi-1.2.53/policies.yaml new file mode 100644 index 000000000..ef5f1ada6 --- /dev/null +++ b/cfg/tkgi-1.2.53/policies.yaml @@ -0,0 +1,287 @@ +--- +controls: +version: "tkgi-1.2.53" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + Exception + This is site-specific setting. + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + Exception + This is site-specific setting. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + Exception + This is site-specific setting. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + Exception + This is site-specific setting. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used." + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + Exception + This is site-specific setting. + scored: false + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + Exception + This is site-specific setting. + scored: false + + - id: 5.2 + text: "Pod Security Policies" + checks: + - id: 5.2.1 + text: "Minimize the admission of privileged containers" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that + the .spec.privileged field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of containers wishing to share the host process ID namespace" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostPID field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host IPC namespace" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostIPC field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host network namespace" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.hostNetwork field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers with allowPrivilegeEscalation" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.allowPrivilegeEscalation field is omitted or set to false. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of root containers" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.runAsUser.rule is set to either MustRunAsNonRoot or MustRunAs with the range of + UIDs not including 0. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of containers with the NET_RAW capability" + type: "manual" + remediation: | + Create a PSP as described in the Kubernetes documentation, ensuring that the + .spec.requiredDropCapabilities is set to include either NET_RAW or ALL. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with added capabilities" + type: "manual" + remediation: | + Ensure that allowedCapabilities is not present in PSPs for the cluster unless + it is set to an empty array. + Exception + This is site-specific setting. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with capabilities assigned" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + Exception + This is site-specific setting. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports Network Policies" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + Exception + This is site-specific setting. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have Network Policies defined" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + Exception + This is site-specific setting. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using secrets as files over secrets as environment variables" + type: "manual" + remediation: | + if possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + Exception + This is site-specific setting. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + Exception + This is site-specific setting. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + Exception + This is site-specific setting. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + Exception + This is site-specific setting. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your pod definitions" + type: "manual" + remediation: | + Seccomp is an alpha feature currently. By default, all alpha features are disabled. So, you + would need to enable alpha features in the apiserver by passing "--feature- + gates=AllAlpha=true" argument. + Edit the /etc/kubernetes/apiserver file on the master node and set the KUBE_API_ARGS + parameter to "--feature-gates=AllAlpha=true" + KUBE_API_ARGS="--feature-gates=AllAlpha=true" + Based on your system, restart the kube-apiserver service. For example: + systemctl restart kube-apiserver.service + Use annotations to enable the docker/default seccomp profile in your pod definitions. An + example is as below: + apiVersion: v1 + kind: Pod + metadata: + name: trustworthy-pod + annotations: + seccomp.security.alpha.kubernetes.io/pod: docker/default + spec: + containers: + - name: trustworthy-container + image: sotrustworthy:latest + Exception + This is site-specific setting. + scored: false + + - id: 5.7.3 + text: "Apply Security Context to Your Pods and Containers " + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Security Benchmark for Docker + Containers. + Exception + This is site-specific setting. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + Exception + This is site-specific setting. + scored: false diff --git a/check/check.go b/check/check.go index a6ebdb03b..58ce6fb7e 100644 --- a/check/check.go +++ b/check/check.go @@ -208,6 +208,16 @@ func (c *Check) runAuditCommands() (lastCommand string, err error) { } c.AuditConfigOutput, err = runAudit(c.AuditConfig) + // when file not found then error comes as exit status 127 + // in some env same error comes as exit status 1 + if err != nil && (strings.Contains(err.Error(), "exit status 127") || + strings.Contains(err.Error(), "No such file or directory")) && + (c.AuditEnvOutput != "" || c.AuditOutput != "") { + // suppress file not found error when there is Audit OR auditEnv output present + glog.V(3).Info(err) + err = nil + c.AuditConfigOutput = "" + } return c.AuditConfig, err } @@ -227,9 +237,9 @@ func (c *Check) execute() (finalOutput *testOutput, err error) { t.auditUsed = AuditCommand result := *(t.execute(c.AuditOutput)) - // Check for AuditConfigOutput only if AuditConfig is set - if !result.flagFound && c.AuditConfig != "" { - //t.isConfigSetting = true + // Check for AuditConfigOutput only if AuditConfig is set and auditConfigOutput is not empty + if !result.flagFound && c.AuditConfig != "" && c.AuditConfigOutput != "" { + // t.isConfigSetting = true t.auditUsed = AuditConfig result = *(t.execute(c.AuditConfigOutput)) if !result.flagFound && t.Env != "" { diff --git a/check/check_test.go b/check/check_test.go index fd18927bf..124e6f93c 100644 --- a/check/check_test.go +++ b/check/check_test.go @@ -69,6 +69,31 @@ func TestCheck_Run(t *testing.T) { }, Expected: PASS, }, + { + name: "Scored checks that pass should PASS when config file is not present", + check: Check{ + Scored: true, + Audit: "echo hello", + AuditConfig: "/test/config.yaml", + Tests: &tests{TestItems: []*testItem{{ + Flag: "hello", + Set: true, + }}}, + }, + Expected: PASS, + }, + { + name: "Scored checks that pass should FAIL when config file is not present", + check: Check{ + Scored: true, + AuditConfig: "/test/config.yaml", + Tests: &tests{TestItems: []*testItem{{ + Flag: "hello", + Set: true, + }}}, + }, + Expected: FAIL, + }, } for _, testCase := range testCases { @@ -115,7 +140,6 @@ func TestCheckAuditEnv(t *testing.T) { } func TestCheckAuditConfig(t *testing.T) { - passingCases := []*Check{ controls.Groups[1].Checks[0], controls.Groups[1].Checks[3], diff --git a/check/controls.go b/check/controls.go index f84acf7b6..171835058 100644 --- a/check/controls.go +++ b/check/controls.go @@ -21,8 +21,8 @@ import ( "fmt" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/securityhub/types" "github.com/golang/glog" "github.com/onsi/ginkgo/reporters" "github.com/spf13/viper" @@ -206,13 +206,13 @@ func (controls *Controls) JUnit() ([]byte, error) { } // ASFF encodes the results of last run to AWS Security Finding Format(ASFF). -func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { - fs := []*securityhub.AwsSecurityFinding{} - a, err := getConfig("AWS_ACCOUNT") +func (controls *Controls) ASFF() ([]types.AwsSecurityFinding, error) { + fs := []types.AwsSecurityFinding{} + account, err := getConfig("AWS_ACCOUNT") if err != nil { return nil, err } - c, err := getConfig("CLUSTER_ARN") + cluster, err := getConfig("CLUSTER_ARN") if err != nil { return nil, err } @@ -220,6 +220,7 @@ func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { if err != nil { return nil, err } + nodeName, _ := getConfig("NODE_NAME") arn := fmt.Sprintf(ARN, region) ti := time.Now() @@ -244,47 +245,52 @@ func (controls *Controls) ASFF() ([]*securityhub.AwsSecurityFinding, error) { if len(check.Reason) > 1024 { reason = check.Reason[0:1023] } + id := aws.String(fmt.Sprintf("%s%sEKSnodeID+%s+%s", arn, account, check.ID, cluster)) + if nodeName != "" { + id = aws.String(fmt.Sprintf("%s%sEKSnodeID+%s+%s+%s", arn, account, check.ID, cluster, nodeName)) + } - f := securityhub.AwsSecurityFinding{ - AwsAccountId: aws.String(a), - Confidence: aws.Int64(100), + f := types.AwsSecurityFinding{ + AwsAccountId: aws.String(account), + Confidence: aws.Int32(100), GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", arn, controls.Version, check.ID)), - Id: aws.String(fmt.Sprintf("%s%sEKSnodeID+%s%s", arn, a, check.ID, tf)), + Id: id, CreatedAt: aws.String(tf), Description: aws.String(check.Text), ProductArn: aws.String(arn), SchemaVersion: aws.String(SCHEMA), Title: aws.String(fmt.Sprintf("%s %s", check.ID, check.Text)), UpdatedAt: aws.String(tf), - Types: []*string{aws.String(TYPE)}, - Severity: &securityhub.Severity{ - Label: aws.String(securityhub.SeverityLabelHigh), + Types: []string{*aws.String(TYPE)}, + Severity: &types.Severity{ + Label: types.SeverityLabelHigh, }, - Remediation: &securityhub.Remediation{ - Recommendation: &securityhub.Recommendation{ + Remediation: &types.Remediation{ + Recommendation: &types.Recommendation{ Text: aws.String(remediation), }, }, - ProductFields: map[string]*string{ - "Reason": aws.String(reason), - "Actual result": aws.String(actualValue), - "Expected result": aws.String(check.ExpectedResult), - "Section": aws.String(fmt.Sprintf("%s %s", controls.ID, controls.Text)), - "Subsection": aws.String(fmt.Sprintf("%s %s", g.ID, g.Text)), + ProductFields: map[string]string{ + "Reason": reason, + "Actual result": actualValue, + "Expected result": check.ExpectedResult, + "Section": fmt.Sprintf("%s %s", controls.ID, controls.Text), + "Subsection": fmt.Sprintf("%s %s", g.ID, g.Text), }, - Resources: []*securityhub.Resource{ + Resources: []types.Resource{ { - Id: aws.String(c), + Id: aws.String(cluster), Type: aws.String(TYPE), }, }, } - fs = append(fs, &f) + fs = append(fs, f) } } } return fs, nil } + func getConfig(name string) (string, error) { r := viper.GetString(name) if len(r) == 0 { @@ -292,6 +298,7 @@ func getConfig(name string) (string, error) { } return r, nil } + func summarize(controls *Controls, state State) { switch state { case PASS: diff --git a/check/controls_test.go b/check/controls_test.go index a0015facb..c2f6ab36e 100644 --- a/check/controls_test.go +++ b/check/controls_test.go @@ -19,14 +19,13 @@ import ( "encoding/json" "encoding/xml" "fmt" - "io/ioutil" "os" "path/filepath" "reflect" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/securityhub/types" "github.com/onsi/ginkgo/reporters" "github.com/spf13/viper" "github.com/stretchr/testify/assert" @@ -53,7 +52,7 @@ func TestYamlFiles(t *testing.T) { } if !info.IsDir() { t.Logf("reading file: %s", path) - in, err := ioutil.ReadFile(path) + in, err := os.ReadFile(path) if err != nil { t.Fatalf("error opening file %s: %v", path, err) } @@ -374,7 +373,7 @@ func TestControls_ASFF(t *testing.T) { tests := []struct { name string fields fields - want []*securityhub.AwsSecurityFinding + want []types.AwsSecurityFinding wantErr bool }{ { @@ -405,32 +404,32 @@ func TestControls_ASFF(t *testing.T) { }, }, }}, - want: []*securityhub.AwsSecurityFinding{ + want: []types.AwsSecurityFinding{ { AwsAccountId: aws.String("foo account"), - Confidence: aws.Int64(100), + Confidence: aws.Int32(100), GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", fmt.Sprintf(ARN, "somewhere"), "1", "check1id")), Description: aws.String("check1text"), ProductArn: aws.String(fmt.Sprintf(ARN, "somewhere")), SchemaVersion: aws.String(SCHEMA), Title: aws.String(fmt.Sprintf("%s %s", "check1id", "check1text")), - Types: []*string{aws.String(TYPE)}, - Severity: &securityhub.Severity{ - Label: aws.String(securityhub.SeverityLabelHigh), + Types: []string{*aws.String(TYPE)}, + Severity: &types.Severity{ + Label: types.SeverityLabelHigh, }, - Remediation: &securityhub.Remediation{ - Recommendation: &securityhub.Recommendation{ + Remediation: &types.Remediation{ + Recommendation: &types.Recommendation{ Text: aws.String("fix me"), }, }, - ProductFields: map[string]*string{ - "Reason": aws.String("failed"), - "Actual result": aws.String("failed"), - "Expected result": aws.String("failed"), - "Section": aws.String(fmt.Sprintf("%s %s", "test1", "test runnner")), - "Subsection": aws.String(fmt.Sprintf("%s %s", "g1", "Group text")), + ProductFields: map[string]string{ + "Reason": "failed", + "Actual result": "failed", + "Expected result": "failed", + "Section": fmt.Sprintf("%s %s", "test1", "test runnner"), + "Subsection": fmt.Sprintf("%s %s", "g1", "Group text"), }, - Resources: []*securityhub.Resource{ + Resources: []types.Resource{ { Id: aws.String("foo Cluster"), Type: aws.String(TYPE), diff --git a/check/test.go b/check/test.go index 212ea182e..2aee9b0b0 100644 --- a/check/test.go +++ b/check/test.go @@ -24,7 +24,7 @@ import ( "strings" "github.com/golang/glog" - yaml "gopkg.in/yaml.v2" + "gopkg.in/yaml.v2" "k8s.io/client-go/util/jsonpath" ) @@ -68,9 +68,11 @@ type testItem struct { auditUsed AuditUsed } -type envTestItem testItem -type pathTestItem testItem -type flagTestItem testItem +type ( + envTestItem testItem + pathTestItem testItem + flagTestItem testItem +) type compare struct { Op string @@ -236,16 +238,16 @@ func (t testItem) evaluate(s string) *testOutput { } result.flagFound = match - var isExist = "exists" + isExist := "exists" if !result.flagFound { isExist = "does not exist" } switch t.auditUsed { - case "auditCommand": + case AuditCommand: glog.V(3).Infof("Flag '%s' %s", t.Flag, isExist) - case "auditConfig": + case AuditConfig: glog.V(3).Infof("Path '%s' %s", t.Path, isExist) - case "auditEnv": + case AuditEnv: glog.V(3).Infof("Env '%s' %s", t.Env, isExist) default: glog.V(3).Infof("Error with identify audit used %s", t.auditUsed) @@ -255,7 +257,6 @@ func (t testItem) evaluate(s string) *testOutput { } func compareOp(tCompareOp string, flagVal string, tCompareValue string, flagName string) (string, bool) { - expectedResultPattern := "" testResult := false diff --git a/check/test_test.go b/check/test_test.go index 8cbad7210..9da55a491 100644 --- a/check/test_test.go +++ b/check/test_test.go @@ -16,7 +16,6 @@ package check import ( "fmt" - "io/ioutil" "os" "strings" "testing" @@ -29,7 +28,7 @@ var ( func init() { var err error - in, err = ioutil.ReadFile("data") + in, err = os.ReadFile("data") if err != nil { panic("Failed reading test data: " + err.Error()) } @@ -46,7 +45,6 @@ func init() { } func TestTestExecute(t *testing.T) { - cases := []struct { check *Check str string @@ -305,7 +303,6 @@ func TestTestExecute(t *testing.T) { } func TestTestExecuteExceptions(t *testing.T) { - cases := []struct { *Check str string @@ -366,7 +363,8 @@ func TestTestUnmarshal(t *testing.T) { `, kubeletConfig{}, false, - }, { + }, + { ` kind: KubeletConfiguration address: 0.0.0.0 @@ -490,34 +488,42 @@ func TestAllElementsValid(t *testing.T) { }, { source: []string{}, - target: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + target: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256"}, + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", + }, valid: false, }, { source: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"}, - target: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + target: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256"}, + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", + }, valid: true, }, { source: []string{"blah"}, - target: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + target: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256"}, + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", + }, valid: false, }, { source: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "blah"}, - target: []string{"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + target: []string{ + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256"}, + "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256", + }, valid: false, }, } @@ -594,494 +600,628 @@ func TestCompareOp(t *testing.T) { // Test Op "eq" {label: "op=eq, both empty", op: "eq", flagVal: "", compareValue: "", expectedResultPattern: "'' is equal to ''", testResult: true, flagName: ""}, - {label: "op=eq, true==true", op: "eq", flagVal: "true", + { + label: "op=eq, true==true", op: "eq", flagVal: "true", compareValue: "true", expectedResultPattern: "'parameterTrue' is equal to 'true'", testResult: true, - flagName: "parameterTrue"}, + flagName: "parameterTrue", + }, - {label: "op=eq, false==false", op: "eq", flagVal: "false", + { + label: "op=eq, false==false", op: "eq", flagVal: "false", compareValue: "false", expectedResultPattern: "'parameterFalse' is equal to 'false'", testResult: true, - flagName: "parameterFalse"}, + flagName: "parameterFalse", + }, - {label: "op=eq, false==true", op: "eq", flagVal: "false", + { + label: "op=eq, false==true", op: "eq", flagVal: "false", compareValue: "true", expectedResultPattern: "'parameterFalse' is equal to 'true'", testResult: false, - flagName: "parameterFalse"}, + flagName: "parameterFalse", + }, - {label: "op=eq, strings match", op: "eq", flagVal: "KubeletConfiguration", + { + label: "op=eq, strings match", op: "eq", flagVal: "KubeletConfiguration", compareValue: "KubeletConfiguration", expectedResultPattern: "'--FlagNameKubeletConf' is equal to 'KubeletConfiguration'", testResult: true, - flagName: "--FlagNameKubeletConf"}, + flagName: "--FlagNameKubeletConf", + }, - {label: "op=eq, flagVal=empty", op: "eq", flagVal: "", + { + label: "op=eq, flagVal=empty", op: "eq", flagVal: "", compareValue: "KubeletConfiguration", expectedResultPattern: "'--FlagNameKubeletConf' is equal to 'KubeletConfiguration'", testResult: false, - flagName: "--FlagNameKubeletConf"}, + flagName: "--FlagNameKubeletConf", + }, - {label: "op=eq, compareValue=empty", + { + label: "op=eq, compareValue=empty", op: "eq", flagVal: "KubeletConfiguration", compareValue: "", expectedResultPattern: "'--FlagNameKubeletConf' is equal to ''", testResult: false, - flagName: "--FlagNameKubeletConf"}, + flagName: "--FlagNameKubeletConf", + }, // Test Op "noteq" - {label: "op=noteq, both empty", + { + label: "op=noteq, both empty", op: "noteq", flagVal: "", compareValue: "", expectedResultPattern: "'parameter' is not equal to ''", testResult: false, - flagName: "parameter"}, + flagName: "parameter", + }, - {label: "op=noteq, true!=true", + { + label: "op=noteq, true!=true", op: "noteq", flagVal: "true", compareValue: "true", expectedResultPattern: "'parameterTrue' is not equal to 'true'", testResult: false, - flagName: "parameterTrue"}, + flagName: "parameterTrue", + }, - {label: "op=noteq, false!=false", + { + label: "op=noteq, false!=false", op: "noteq", flagVal: "false", compareValue: "false", expectedResultPattern: "'parameterFalse' is not equal to 'false'", testResult: false, - flagName: "parameterFalse"}, + flagName: "parameterFalse", + }, - {label: "op=noteq, false!=true", + { + label: "op=noteq, false!=true", op: "noteq", flagVal: "false", compareValue: "true", expectedResultPattern: "'parameterFalse' is not equal to 'true'", testResult: true, - flagName: "parameterFalse"}, + flagName: "parameterFalse", + }, - {label: "op=noteq, strings match", + { + label: "op=noteq, strings match", op: "noteq", flagVal: "KubeletConfiguration", compareValue: "KubeletConfiguration", expectedResultPattern: "'--FlagNameKubeletConf' is not equal to 'KubeletConfiguration'", testResult: false, - flagName: "--FlagNameKubeletConf"}, + flagName: "--FlagNameKubeletConf", + }, - {label: "op=noteq, flagVal=empty", + { + label: "op=noteq, flagVal=empty", op: "noteq", flagVal: "", compareValue: "KubeletConfiguration", expectedResultPattern: "'--FlagNameKubeletConf' is not equal to 'KubeletConfiguration'", testResult: true, - flagName: "--FlagNameKubeletConf"}, + flagName: "--FlagNameKubeletConf", + }, - {label: "op=noteq, compareValue=empty", + { + label: "op=noteq, compareValue=empty", op: "noteq", flagVal: "KubeletConfiguration", compareValue: "", expectedResultPattern: "'--FlagNameKubeletConf' is not equal to ''", testResult: true, - flagName: "--FlagNameKubeletConf"}, + flagName: "--FlagNameKubeletConf", + }, // Test Op "gt" - {label: "op=gt, both empty", + { + label: "op=gt, both empty", op: "gt", flagVal: "", compareValue: "", expectedResultPattern: "Invalid Number(s) used for comparison: '' ''", testResult: false, - flagName: "flagName"}, - {label: "op=gt, 0 > 0", + flagName: "flagName", + }, + { + label: "op=gt, 0 > 0", op: "gt", flagVal: "0", compareValue: "0", expectedResultPattern: "'flagName' is greater than 0", testResult: false, - flagName: "flagName"}, - {label: "op=gt, 4 > 5", + flagName: "flagName", + }, + { + label: "op=gt, 4 > 5", op: "gt", flagVal: "4", compareValue: "5", expectedResultPattern: "'flagName' is greater than 5", testResult: false, - flagName: "flagName"}, - {label: "op=gt, 5 > 4", + flagName: "flagName", + }, + { + label: "op=gt, 5 > 4", op: "gt", flagVal: "5", compareValue: "4", expectedResultPattern: "'flagName' is greater than 4", testResult: true, - flagName: "flagName"}, - {label: "op=gt, 5 > 5", + flagName: "flagName", + }, + { + label: "op=gt, 5 > 5", op: "gt", flagVal: "5", compareValue: "5", expectedResultPattern: "'flagName' is greater than 5", testResult: false, - flagName: "flagName"}, - {label: "op=gt, Pikachu > 5", + flagName: "flagName", + }, + { + label: "op=gt, Pikachu > 5", op: "gt", flagVal: "Pikachu", compareValue: "5", expectedResultPattern: "Invalid Number(s) used for comparison: 'Pikachu' '5'", testResult: false, - flagName: "flagName"}, - {label: "op=gt, 5 > Bulbasaur", + flagName: "flagName", + }, + { + label: "op=gt, 5 > Bulbasaur", op: "gt", flagVal: "5", compareValue: "Bulbasaur", expectedResultPattern: "Invalid Number(s) used for comparison: '5' 'Bulbasaur'", testResult: false, - flagName: "flagName"}, + flagName: "flagName", + }, // Test Op "lt" - {label: "op=lt, both empty", + { + label: "op=lt, both empty", op: "lt", flagVal: "", compareValue: "", expectedResultPattern: "Invalid Number(s) used for comparison: '' ''", testResult: false, - flagName: "flagName"}, - {label: "op=lt, 0 < 0", + flagName: "flagName", + }, + { + label: "op=lt, 0 < 0", op: "lt", flagVal: "0", compareValue: "0", expectedResultPattern: "'flagName' is lower than 0", testResult: false, - flagName: "flagName"}, - {label: "op=lt, 4 < 5", + flagName: "flagName", + }, + { + label: "op=lt, 4 < 5", op: "lt", flagVal: "4", compareValue: "5", expectedResultPattern: "'flagName' is lower than 5", testResult: true, - flagName: "flagName"}, - {label: "op=lt, 5 < 4", + flagName: "flagName", + }, + { + label: "op=lt, 5 < 4", op: "lt", flagVal: "5", compareValue: "4", expectedResultPattern: "'flagName' is lower than 4", testResult: false, - flagName: "flagName"}, - {label: "op=lt, 5 < 5", + flagName: "flagName", + }, + { + label: "op=lt, 5 < 5", op: "lt", flagVal: "5", compareValue: "5", expectedResultPattern: "'flagName' is lower than 5", testResult: false, - flagName: "flagName"}, - {label: "op=lt, Charmander < 5", + flagName: "flagName", + }, + { + label: "op=lt, Charmander < 5", op: "lt", flagVal: "Charmander", compareValue: "5", expectedResultPattern: "Invalid Number(s) used for comparison: 'Charmander' '5'", testResult: false, - flagName: "flagName"}, - {label: "op=lt, 5 < Charmeleon", + flagName: "flagName", + }, + { + label: "op=lt, 5 < Charmeleon", op: "lt", flagVal: "5", compareValue: "Charmeleon", expectedResultPattern: "Invalid Number(s) used for comparison: '5' 'Charmeleon'", testResult: false, - flagName: "flagName"}, + flagName: "flagName", + }, // Test Op "gte" - {label: "op=gte, both empty", + { + label: "op=gte, both empty", op: "gte", flagVal: "", compareValue: "", expectedResultPattern: "Invalid Number(s) used for comparison: '' ''", testResult: false, - flagName: "flagName"}, - {label: "op=gte, 0 >= 0", + flagName: "flagName", + }, + { + label: "op=gte, 0 >= 0", op: "gte", flagVal: "0", compareValue: "0", expectedResultPattern: "'flagName' is greater or equal to 0", testResult: true, - flagName: "flagName"}, - {label: "op=gte, 4 >= 5", + flagName: "flagName", + }, + { + label: "op=gte, 4 >= 5", op: "gte", flagVal: "4", compareValue: "5", expectedResultPattern: "'flagName' is greater or equal to 5", testResult: false, - flagName: "flagName"}, - {label: "op=gte, 5 >= 4", + flagName: "flagName", + }, + { + label: "op=gte, 5 >= 4", op: "gte", flagVal: "5", compareValue: "4", expectedResultPattern: "'flagName' is greater or equal to 4", testResult: true, - flagName: "flagName"}, - {label: "op=gte, 5 >= 5", + flagName: "flagName", + }, + { + label: "op=gte, 5 >= 5", op: "gte", flagVal: "5", compareValue: "5", expectedResultPattern: "'flagName' is greater or equal to 5", testResult: true, - flagName: "flagName"}, - {label: "op=gte, Ekans >= 5", + flagName: "flagName", + }, + { + label: "op=gte, Ekans >= 5", op: "gte", flagVal: "Ekans", compareValue: "5", expectedResultPattern: "Invalid Number(s) used for comparison: 'Ekans' '5'", testResult: false, - flagName: "flagName"}, - {label: "op=gte, 4 >= Zubat", + flagName: "flagName", + }, + { + label: "op=gte, 4 >= Zubat", op: "gte", flagVal: "4", compareValue: "Zubat", expectedResultPattern: "Invalid Number(s) used for comparison: '4' 'Zubat'", testResult: false, - flagName: "flagName"}, + flagName: "flagName", + }, // Test Op "lte" - {label: "op=lte, both empty", + { + label: "op=lte, both empty", op: "lte", flagVal: "", compareValue: "", expectedResultPattern: "Invalid Number(s) used for comparison: '' ''", testResult: false, - flagName: "flagName"}, - {label: "op=lte, 0 <= 0", + flagName: "flagName", + }, + { + label: "op=lte, 0 <= 0", op: "lte", flagVal: "0", compareValue: "0", expectedResultPattern: "'flagName' is lower or equal to 0", testResult: true, - flagName: "flagName"}, - {label: "op=lte, 4 <= 5", + flagName: "flagName", + }, + { + label: "op=lte, 4 <= 5", op: "lte", flagVal: "4", compareValue: "5", expectedResultPattern: "'flagName' is lower or equal to 5", testResult: true, - flagName: "flagName"}, - {label: "op=lte, 5 <= 4", + flagName: "flagName", + }, + { + label: "op=lte, 5 <= 4", op: "lte", flagVal: "5", compareValue: "4", expectedResultPattern: "'flagName' is lower or equal to 4", testResult: false, - flagName: "flagName"}, - {label: "op=lte, 5 <= 5", + flagName: "flagName", + }, + { + label: "op=lte, 5 <= 5", op: "lte", flagVal: "5", compareValue: "5", expectedResultPattern: "'flagName' is lower or equal to 5", testResult: true, - flagName: "flagName"}, - {label: "op=lte, Venomoth <= 4", + flagName: "flagName", + }, + { + label: "op=lte, Venomoth <= 4", op: "lte", flagVal: "Venomoth", compareValue: "4", expectedResultPattern: "Invalid Number(s) used for comparison: 'Venomoth' '4'", testResult: false, - flagName: "flagName"}, - {label: "op=lte, 5 <= Meowth", + flagName: "flagName", + }, + { + label: "op=lte, 5 <= Meowth", op: "lte", flagVal: "5", compareValue: "Meowth", expectedResultPattern: "Invalid Number(s) used for comparison: '5' 'Meowth'", testResult: false, - flagName: "flagName"}, + flagName: "flagName", + }, // Test Op "has" - {label: "op=has, both empty", + { + label: "op=has, both empty", op: "has", flagVal: "", compareValue: "", expectedResultPattern: "'flagName' has ''", testResult: true, - flagName: "flagName"}, - {label: "op=has, flagVal=empty", + flagName: "flagName", + }, + { + label: "op=has, flagVal=empty", op: "has", flagVal: "", compareValue: "blah", expectedResultPattern: "'flagName' has 'blah'", testResult: false, - flagName: "flagName"}, - {label: "op=has, compareValue=empty", + flagName: "flagName", + }, + { + label: "op=has, compareValue=empty", op: "has", flagVal: "blah", compareValue: "", expectedResultPattern: "'flagName-blah' has ''", testResult: true, - flagName: "flagName-blah"}, - {label: "op=has, 'blah' has 'la'", + flagName: "flagName-blah", + }, + { + label: "op=has, 'blah' has 'la'", op: "has", flagVal: "blah", compareValue: "la", expectedResultPattern: "'flagName-blah' has 'la'", testResult: true, - flagName: "flagName-blah"}, - {label: "op=has, 'blah' has 'LA'", + flagName: "flagName-blah", + }, + { + label: "op=has, 'blah' has 'LA'", op: "has", flagVal: "blah", compareValue: "LA", expectedResultPattern: "'flagName-blah' has 'LA'", testResult: false, - flagName: "flagName-blah"}, - {label: "op=has, 'blah' has 'lo'", + flagName: "flagName-blah", + }, + { + label: "op=has, 'blah' has 'lo'", op: "has", flagVal: "blah", compareValue: "lo", expectedResultPattern: "'flagName-blah' has 'lo'", testResult: false, - flagName: "flagName-blah"}, + flagName: "flagName-blah", + }, // Test Op "nothave" - {label: "op=nothave, both empty", + { + label: "op=nothave, both empty", op: "nothave", flagVal: "", compareValue: "", expectedResultPattern: "'flagName' does not have ''", testResult: false, - flagName: "flagName"}, - {label: "op=nothave, flagVal=empty", + flagName: "flagName", + }, + { + label: "op=nothave, flagVal=empty", op: "nothave", flagVal: "", compareValue: "blah", expectedResultPattern: "'flagName' does not have 'blah'", testResult: true, - flagName: "flagName"}, - {label: "op=nothave, compareValue=empty", + flagName: "flagName", + }, + { + label: "op=nothave, compareValue=empty", op: "nothave", flagVal: "blah", compareValue: "", expectedResultPattern: "'flagName-blah' does not have ''", testResult: false, - flagName: "flagName-blah"}, - {label: "op=nothave, 'blah' not have 'la'", + flagName: "flagName-blah", + }, + { + label: "op=nothave, 'blah' not have 'la'", op: "nothave", flagVal: "blah", compareValue: "la", expectedResultPattern: "'flagName-blah' does not have 'la'", testResult: false, - flagName: "flagName-blah"}, - {label: "op=nothave, 'blah' not have 'LA'", + flagName: "flagName-blah", + }, + { + label: "op=nothave, 'blah' not have 'LA'", op: "nothave", flagVal: "blah", compareValue: "LA", expectedResultPattern: "'flagName-blah' does not have 'LA'", testResult: true, - flagName: "flagName-blah"}, - {label: "op=nothave, 'blah' not have 'lo'", + flagName: "flagName-blah", + }, + { + label: "op=nothave, 'blah' not have 'lo'", op: "nothave", flagVal: "blah", compareValue: "lo", expectedResultPattern: "'flagName-blah' does not have 'lo'", testResult: true, - flagName: "flagName-blah"}, + flagName: "flagName-blah", + }, // Test Op "regex" - {label: "op=regex, both empty", + { + label: "op=regex, both empty", op: "regex", flagVal: "", compareValue: "", expectedResultPattern: "'flagName' matched by regex expression ''", testResult: true, - flagName: "flagName"}, - {label: "op=regex, flagVal=empty", + flagName: "flagName", + }, + { + label: "op=regex, flagVal=empty", op: "regex", flagVal: "", compareValue: "blah", expectedResultPattern: "'flagName' matched by regex expression 'blah'", testResult: false, - flagName: "flagName"}, + flagName: "flagName", + }, // Test Op "valid_elements" - {label: "op=valid_elements, valid_elements both empty", + { + label: "op=valid_elements, valid_elements both empty", op: "valid_elements", flagVal: "", compareValue: "", expectedResultPattern: "'flagWithMultipleElements' contains valid elements from ''", testResult: true, - flagName: "flagWithMultipleElements"}, + flagName: "flagWithMultipleElements", + }, - {label: "op=valid_elements, valid_elements flagVal empty", + { + label: "op=valid_elements, valid_elements flagVal empty", op: "valid_elements", flagVal: "", compareValue: "a,b", expectedResultPattern: "'flagWithMultipleElements' contains valid elements from 'a,b'", testResult: false, - flagName: "flagWithMultipleElements"}, + flagName: "flagWithMultipleElements", + }, - {label: "op=valid_elements, valid_elements compareValue empty", + { + label: "op=valid_elements, valid_elements compareValue empty", op: "valid_elements", flagVal: "a,b", compareValue: "", expectedResultPattern: "'flagWithMultipleElements' contains valid elements from ''", testResult: false, - flagName: "flagWithMultipleElements"}, - {label: "op=valid_elements, valid_elements two list equals", + flagName: "flagWithMultipleElements", + }, + { + label: "op=valid_elements, valid_elements two list equals", op: "valid_elements", flagVal: "a,b,c", compareValue: "a,b,c", expectedResultPattern: "'flagWithMultipleElements' contains valid elements from 'a,b,c'", testResult: true, - flagName: "flagWithMultipleElements"}, - {label: "op=valid_elements, valid_elements partial flagVal valid", + flagName: "flagWithMultipleElements", + }, + { + label: "op=valid_elements, valid_elements partial flagVal valid", op: "valid_elements", flagVal: "a,c", compareValue: "a,b,c", expectedResultPattern: "'flagWithMultipleElements' contains valid elements from 'a,b,c'", testResult: true, - flagName: "flagWithMultipleElements"}, - {label: "op=valid_elements, valid_elements partial compareValue valid", + flagName: "flagWithMultipleElements", + }, + { + label: "op=valid_elements, valid_elements partial compareValue valid", op: "valid_elements", flagVal: "a,b,c", compareValue: "a,c", expectedResultPattern: "'flagWithMultipleElements' contains valid elements from 'a,c'", testResult: false, - flagName: "flagWithMultipleElements"}, + flagName: "flagWithMultipleElements", + }, // Test Op "bitmask" - {label: "op=bitmask, 644 AND 640", + { + label: "op=bitmask, 644 AND 640", op: "bitmask", flagVal: "640", compareValue: "644", expectedResultPattern: "etc/fileExamplePermission640 has permissions 640, expected 644 or more restrictive", testResult: true, - flagName: "etc/fileExamplePermission640"}, - {label: "op=bitmask, 644 AND 777", + flagName: "etc/fileExamplePermission640", + }, + { + label: "op=bitmask, 644 AND 777", op: "bitmask", flagVal: "777", compareValue: "644", expectedResultPattern: "etc/fileExamplePermission777 has permissions 777, expected 644 or more restrictive", testResult: false, - flagName: "etc/fileExamplePermission777"}, - {label: "op=bitmask, 644 AND 444", + flagName: "etc/fileExamplePermission777", + }, + { + label: "op=bitmask, 644 AND 444", op: "bitmask", flagVal: "444", compareValue: "644", expectedResultPattern: "etc/fileExamplePermission444 has permissions 444, expected 644 or more restrictive", testResult: true, - flagName: "etc/fileExamplePermission444"}, - {label: "op=bitmask, 644 AND 211", + flagName: "etc/fileExamplePermission444", + }, + { + label: "op=bitmask, 644 AND 211", op: "bitmask", flagVal: "211", compareValue: "644", expectedResultPattern: "etc/fileExamplePermission211 has permissions 211, expected 644 or more restrictive", testResult: false, - flagName: "etc/fileExamplePermission211"}, - {label: "op=bitmask, Harry AND 211", + flagName: "etc/fileExamplePermission211", + }, + { + label: "op=bitmask, Harry AND 211", op: "bitmask", flagVal: "Harry", compareValue: "644", expectedResultPattern: "Not numeric value - flag: Harry", testResult: false, - flagName: "etc/fileExample"}, - {label: "op=bitmask, 644 AND Potter", + flagName: "etc/fileExample", + }, + { + label: "op=bitmask, 644 AND Potter", op: "bitmask", flagVal: "211", compareValue: "Potter", expectedResultPattern: "Not numeric value - flag: Potter", testResult: false, - flagName: "etc/fileExample"}, + flagName: "etc/fileExample", + }, } for _, c := range cases { @@ -1136,37 +1276,36 @@ func TestToNumeric(t *testing.T) { } func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { - type Resources struct { - Resources []string `json:"resources"` - Providers []map[string]interface{} `json:"providers"` + Resources []string `json:"resources"` + Providers []map[string]interface{} `json:"providers"` } type EncryptionConfig struct { - Kind string `json:"kind"` - ApiVersion string `json:"apiVersion"` - Resources []Resources `json:"resources"` + Kind string `json:"kind"` + ApiVersion string `json:"apiVersion"` + Resources []Resources `json:"resources"` } type Key struct { - Secret string `json:"secret"` - Name string `json:"name"` + Secret string `json:"secret"` + Name string `json:"name"` } type Aescbc struct { - Keys []Key `json:"keys"` + Keys []Key `json:"keys"` } type SecretBox struct { - Keys []Key `json:"keys"` + Keys []Key `json:"keys"` } - type Aesgcm struct { - Keys []Key `json:"keys"` + type Aesgcm struct { + Keys []Key `json:"keys"` } // identity disable encryption when set as the first parameter - type Identity struct {} + type Identity struct{} cases := []struct { name string @@ -1179,11 +1318,12 @@ func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { "JSONPath parse works, results match", "{.resources[*].providers[*].aescbc.keys[*].secret}", EncryptionConfig{ - Kind: "EncryptionConfig", + Kind: "EncryptionConfig", ApiVersion: "v1", Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ - {"aescbc": Aescbc{Keys: []Key{Key{Secret: "secret1", Name: "name1"}}}}, - }}}}, + {"aescbc": Aescbc{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, "secret1", false, }, @@ -1191,11 +1331,12 @@ func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { "JSONPath parse works, results match", "{.resources[*].providers[*].aescbc.keys[*].name}", EncryptionConfig{ - Kind: "EncryptionConfig", + Kind: "EncryptionConfig", ApiVersion: "v1", Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ - {"aescbc": Aescbc{Keys: []Key{Key{Secret: "secret1", Name: "name1"}}}}, - }}}}, + {"aescbc": Aescbc{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, "name1", false, }, @@ -1203,11 +1344,12 @@ func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { "JSONPath parse works, results don't match", "{.resources[*].providers[*].aescbc.keys[*].secret}", EncryptionConfig{ - Kind: "EncryptionConfig", + Kind: "EncryptionConfig", ApiVersion: "v1", Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ - {"aesgcm": Aesgcm{Keys: []Key{Key{Secret: "secret1", Name: "name1"}}}}, - }}}}, + {"aesgcm": Aesgcm{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, "secret1", true, }, @@ -1215,11 +1357,12 @@ func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { "JSONPath parse works, results match", "{.resources[*].providers[*].aesgcm.keys[*].secret}", EncryptionConfig{ - Kind: "EncryptionConfig", + Kind: "EncryptionConfig", ApiVersion: "v1", Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ - {"aesgcm": Aesgcm{Keys: []Key{Key{Secret: "secret1", Name: "name1"}}}}, - }}}}, + {"aesgcm": Aesgcm{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, "secret1", false, }, @@ -1227,11 +1370,12 @@ func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { "JSONPath parse works, results match", "{.resources[*].providers[*].secretbox.keys[*].secret}", EncryptionConfig{ - Kind: "EncryptionConfig", + Kind: "EncryptionConfig", ApiVersion: "v1", Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ - {"secretbox": SecretBox{Keys: []Key{Key{Secret: "secret1", Name: "name1"}}}}, - }}}}, + {"secretbox": SecretBox{Keys: []Key{{Secret: "secret1", Name: "name1"}}}}, + }}}, + }, "secret1", false, }, @@ -1239,11 +1383,12 @@ func TestExecuteJSONPathOnEncryptionConfig(t *testing.T) { "JSONPath parse works, results match", "{.resources[*].providers[*].aescbc.keys[*].secret}", EncryptionConfig{ - Kind: "EncryptionConfig", + Kind: "EncryptionConfig", ApiVersion: "v1", Resources: []Resources{{Resources: []string{"secrets"}, Providers: []map[string]interface{}{ - {"aescbc": Aescbc{Keys: []Key{Key{Secret: "secret1", Name: "name1"}, Key{Secret: "secret2", Name: "name2"}}}}, - }}}}, + {"aescbc": Aescbc{Keys: []Key{{Secret: "secret1", Name: "name1"}, {Secret: "secret2", Name: "name2"}}}}, + }}}, + }, "secret1 secret2", false, }, diff --git a/cmd/common.go b/cmd/common.go index 6fb72ae1a..6f83cee4d 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -18,7 +18,6 @@ import ( "bufio" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "sort" @@ -47,7 +46,7 @@ func NewRunFilter(opts FilterOpts) (check.Predicate, error) { } return func(g *check.Group, c *check.Check) bool { - var test = true + test := true if len(groupIDs) > 0 { _, ok := groupIDs[g.ID] test = test && ok @@ -71,7 +70,7 @@ func runChecks(nodetype check.NodeType, testYamlFile, detectedVersion string) { os.Exit(1) } - in, err := ioutil.ReadFile(testYamlFile) + in, err := os.ReadFile(testYamlFile) if err != nil { exitWithError(fmt.Errorf("error opening %s test file: %v", testYamlFile, err)) } @@ -87,7 +86,6 @@ func runChecks(nodetype check.NodeType, testYamlFile, detectedVersion string) { // Get the set of executables we need for this section of the tests binmap, err := getBinaries(typeConf, nodetype) - // Checks that the executables we need for the section are running. if err != nil { glog.V(1).Info(fmt.Sprintf("failed to get a set of executables needed for tests: %v", err)) @@ -97,6 +95,7 @@ func runChecks(nodetype check.NodeType, testYamlFile, detectedVersion string) { svcmap := getFiles(typeConf, "service") kubeconfmap := getFiles(typeConf, "kubeconfig") cafilemap := getFiles(typeConf, "ca") + datadirmap := getFiles(typeConf, "datadir") // Variable substitutions. Replace all occurrences of variables in controls files. s := string(in) @@ -105,6 +104,7 @@ func runChecks(nodetype check.NodeType, testYamlFile, detectedVersion string) { s, _ = makeSubstitutions(s, "svc", svcmap) s, _ = makeSubstitutions(s, "kubeconfig", kubeconfmap) s, _ = makeSubstitutions(s, "cafile", cafilemap) + s, _ = makeSubstitutions(s, "datadir", datadirmap) controls, err := check.NewControls(nodetype, []byte(s), detectedVersion) if err != nil { @@ -134,7 +134,7 @@ func generateDefaultEnvAudit(controls *check.Controls, binSubs []string) { if len(binSubs) == 1 { binPath = binSubs[0] } else { - fmt.Printf("AuditEnv not explicit for check (%s), where bin path cannot be determined\n", checkItem.ID) + glog.V(1).Infof("AuditEnv not explicit for check (%s), where bin path cannot be determined", checkItem.ID) } if test.Env != "" && checkItem.AuditEnv == "" { @@ -148,7 +148,7 @@ func generateDefaultEnvAudit(controls *check.Controls, binSubs []string) { } func parseSkipIds(skipIds string) map[string]bool { - var skipIdMap = make(map[string]bool, 0) + skipIdMap := make(map[string]bool, 0) if skipIds != "" { for _, id := range strings.Split(skipIds, ",") { skipIdMap[strings.Trim(id, " ")] = true diff --git a/cmd/common_test.go b/cmd/common_test.go index 04c8cb35e..53793a000 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -18,7 +18,7 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "io" "os" "path" "path/filepath" @@ -52,7 +52,6 @@ func TestParseSkipIds(t *testing.T) { } func TestNewRunFilter(t *testing.T) { - type TestCase struct { Name string FilterOpts FilterOpts @@ -139,7 +138,6 @@ func TestNewRunFilter(t *testing.T) { // then assert.EqualError(t, err, "group option and check option can't be used together") }) - } func TestIsMaster(t *testing.T) { @@ -212,7 +210,6 @@ func TestIsMaster(t *testing.T) { } func TestMapToCISVersion(t *testing.T) { - viperWithData, err := loadConfigForTest() if err != nil { t.Fatalf("Unable to load config file %v", err) @@ -240,6 +237,14 @@ func TestMapToCISVersion(t *testing.T) { {kubeVersion: "1.19", succeed: true, exp: "cis-1.20"}, {kubeVersion: "1.20", succeed: true, exp: "cis-1.20"}, {kubeVersion: "1.21", succeed: true, exp: "cis-1.20"}, + {kubeVersion: "1.22", succeed: true, exp: "cis-1.23"}, + {kubeVersion: "1.23", succeed: true, exp: "cis-1.23"}, + {kubeVersion: "1.24", succeed: true, exp: "cis-1.24"}, + {kubeVersion: "1.25", succeed: true, exp: "cis-1.7"}, + {kubeVersion: "1.26", succeed: true, exp: "cis-1.8"}, + {kubeVersion: "1.27", succeed: true, exp: "cis-1.9"}, + {kubeVersion: "1.28", succeed: true, exp: "cis-1.9"}, + {kubeVersion: "1.29", succeed: true, exp: "cis-1.9"}, {kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"}, {kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"}, {kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"}, @@ -443,6 +448,18 @@ func TestValidTargets(t *testing.T) { targets: []string{"node", "policies", "controlplane", "managedservices"}, expected: true, }, + { + name: "eks-1.1.0 valid", + benchmark: "eks-1.1.0", + targets: []string{"node", "policies", "controlplane", "managedservices"}, + expected: true, + }, + { + name: "eks-1.2.0 valid", + benchmark: "eks-1.2.0", + targets: []string{"node", "policies", "controlplane", "managedservices"}, + expected: true, + }, } for _, c := range cases { @@ -667,7 +684,7 @@ func TestPrintSummary(t *testing.T) { os.Stdout = w printSummary(resultTotals, "totals") w.Close() - out, _ := ioutil.ReadAll(r) + out, _ := io.ReadAll(r) os.Stdout = rescueStdout assert.Contains(t, string(out), "49 checks PASS\n12 checks FAIL\n14 checks WARN\n0 checks INFO\n\n") @@ -686,7 +703,7 @@ func TestPrettyPrintNoSummary(t *testing.T) { noSummary = true prettyPrint(controlsCollection[0], resultTotals) w.Close() - out, _ := ioutil.ReadAll(r) + out, _ := io.ReadAll(r) os.Stdout = rescueStdout assert.NotContains(t, string(out), "49 checks PASS") @@ -705,7 +722,7 @@ func TestPrettyPrintSummary(t *testing.T) { noSummary = false prettyPrint(controlsCollection[0], resultTotals) w.Close() - out, _ := ioutil.ReadAll(r) + out, _ := io.ReadAll(r) os.Stdout = rescueStdout assert.Contains(t, string(out), "49 checks PASS") @@ -723,7 +740,7 @@ func TestWriteStdoutOutputNoTotal(t *testing.T) { noTotals = true writeStdoutOutput(controlsCollection) w.Close() - out, _ := ioutil.ReadAll(r) + out, _ := io.ReadAll(r) os.Stdout = rescueStdout assert.NotContains(t, string(out), "49 checks PASS") @@ -743,7 +760,7 @@ func TestWriteStdoutOutputTotal(t *testing.T) { noTotals = false writeStdoutOutput(controlsCollection) w.Close() - out, _ := ioutil.ReadAll(r) + out, _ := io.ReadAll(r) os.Stdout = rescueStdout @@ -753,7 +770,7 @@ func TestWriteStdoutOutputTotal(t *testing.T) { func parseControlsJsonFile(filepath string) ([]*check.Controls, error) { var result []*check.Controls - d, err := ioutil.ReadFile(filepath) + d, err := os.ReadFile(filepath) if err != nil { return nil, err } @@ -768,7 +785,7 @@ func parseControlsJsonFile(filepath string) ([]*check.Controls, error) { func parseResultJsonFile(filepath string) (JsonOutputFormat, error) { var result JsonOutputFormat - d, err := ioutil.ReadFile(filepath) + d, err := os.ReadFile(filepath) if err != nil { return result, err } @@ -783,7 +800,7 @@ func parseResultJsonFile(filepath string) (JsonOutputFormat, error) { func parseResultNoTotalsJsonFile(filepath string) ([]*check.Controls, error) { var result []*check.Controls - d, err := ioutil.ReadFile(filepath) + d, err := os.ReadFile(filepath) if err != nil { return nil, err } @@ -808,7 +825,7 @@ type restoreFn func() func fakeExecutableInPath(execFile, execCode string) (restoreFn, error) { pathenv := os.Getenv("PATH") - tmp, err := ioutil.TempDir("", "TestfakeExecutableInPath") + tmp, err := os.MkdirTemp("", "TestfakeExecutableInPath") if err != nil { return nil, err } @@ -819,7 +836,7 @@ func fakeExecutableInPath(execFile, execCode string) (restoreFn, error) { } if len(execCode) > 0 { - ioutil.WriteFile(filepath.Join(tmp, execFile), []byte(execCode), 0700) + os.WriteFile(filepath.Join(tmp, execFile), []byte(execCode), 0700) } else { f, err := os.OpenFile(execFile, os.O_CREATE|os.O_EXCL, 0700) if err != nil { diff --git a/cmd/database.go b/cmd/database.go index 6732ede64..17d202dd5 100644 --- a/cmd/database.go +++ b/cmd/database.go @@ -38,7 +38,7 @@ func getPsqlConnInfo() (PsqlConnInfo, error) { if value := viper.GetString("PGSQL_DBNAME"); value != "" { dbName = value } else { - return PsqlConnInfo{}, fmt.Errorf("%s_PGSQL_USER env var is required", envVarsPrefix) + return PsqlConnInfo{}, fmt.Errorf("%s_PGSQL_DBNAME env var is required", envVarsPrefix) } var sslMode string diff --git a/cmd/kubernetes_version.go b/cmd/kubernetes_version.go index 3f3f7e96f..ae11909af 100644 --- a/cmd/kubernetes_version.go +++ b/cmd/kubernetes_version.go @@ -5,7 +5,7 @@ import ( "encoding/json" "encoding/pem" "fmt" - "io/ioutil" + "io" "net/http" "os" "strings" @@ -45,7 +45,7 @@ func getKubeVersionFromRESTAPI() (*KubeVersion, error) { return nil, err } - tb, err := ioutil.ReadFile(tokenfile) + tb, err := os.ReadFile(tokenfile) if err != nil { glog.V(2).Infof("Failed reading token file Error: %s", err) return nil, err @@ -128,7 +128,6 @@ func getWebData(srvURL, token string, cacert *tls.Certificate) ([]byte, error) { } authToken := fmt.Sprintf("Bearer %s", token) - glog.V(2).Info(fmt.Sprintf("getWebData AUTH TOKEN --[%q]--\n", authToken)) req.Header.Set("Authorization", authToken) resp, err := client.Do(req) @@ -144,11 +143,11 @@ func getWebData(srvURL, token string, cacert *tls.Certificate) ([]byte, error) { return nil, err } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } func loadCertificate(certFile string) (*tls.Certificate, error) { - cacert, err := ioutil.ReadFile(certFile) + cacert, err := os.ReadFile(certFile) if err != nil { return nil, err } diff --git a/cmd/kubernetes_version_test.go b/cmd/kubernetes_version_test.go index b06f79f49..d76fbe2b7 100644 --- a/cmd/kubernetes_version_test.go +++ b/cmd/kubernetes_version_test.go @@ -3,7 +3,6 @@ package cmd import ( "crypto/tls" "fmt" - "io/ioutil" "net/http" "net/http/httptest" "os" @@ -12,13 +11,13 @@ import ( ) func TestLoadCertificate(t *testing.T) { - tmp, err := ioutil.TempDir("", "TestFakeLoadCertificate") + tmp, err := os.MkdirTemp("", "TestFakeLoadCertificate") if err != nil { t.Fatalf("unable to create temp directory: %v", err) } defer os.RemoveAll(tmp) - goodCertFile, _ := ioutil.TempFile(tmp, "good-cert-*") + goodCertFile, _ := os.CreateTemp(tmp, "good-cert-*") _, _ = goodCertFile.Write([]byte(`-----BEGIN CERTIFICATE----- MIICyDCCAbCgAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl cm5ldGVzMB4XDTE5MTEwODAxNDAwMFoXDTI5MTEwNTAxNDAwMFowFTETMBEGA1UE @@ -36,7 +35,7 @@ jLv3UYZRHMpuNS8BJU74BuVzVPHd55RAl+bV8yemdZJ7pPzMvGbZ7zRXWODTDlge CQb9lY+jYErisH8Sq7uABFPvi7RaTh8SS7V7OxqHZvmttNTdZs4TIkk45JK7Y+Xq FAjB57z2NcIgJuVpQnGRYtr/JcH2Qdsq8bLtXaojUIWOOqoTDRLYozdMOOQ= -----END CERTIFICATE-----`)) - badCertFile, _ := ioutil.TempFile(tmp, "bad-cert-*") + badCertFile, _ := os.CreateTemp(tmp, "bad-cert-*") cases := []struct { file string @@ -72,7 +71,6 @@ FAjB57z2NcIgJuVpQnGRYtr/JcH2Qdsq8bLtXaojUIWOOqoTDRLYozdMOOQ= t.Errorf("Expected error") } } - }) } } @@ -124,8 +122,8 @@ func TestGetWebData(t *testing.T) { } }) } - } + func TestGetWebDataWithRetry(t *testing.T) { okfn := func(w http.ResponseWriter, r *http.Request) { _, _ = fmt.Fprintln(w, `{ @@ -173,8 +171,8 @@ func TestGetWebDataWithRetry(t *testing.T) { } }) } - } + func TestExtractVersion(t *testing.T) { okJSON := []byte(`{ "major": "1", @@ -231,7 +229,6 @@ func TestExtractVersion(t *testing.T) { } func TestGetKubernetesURL(t *testing.T) { - resetEnvs := func() { os.Unsetenv("KUBE_BENCH_K8S_ENV") os.Unsetenv("KUBERNETES_SERVICE_HOST") @@ -266,16 +263,9 @@ func TestGetKubernetesURL(t *testing.T) { } k8sURL := getKubernetesURL() - if !c.useDefault { - if k8sURL != c.expected { - t.Errorf("Expected %q but Got %q", k8sURL, c.expected) - } - } else { - if k8sURL != c.expected { - t.Errorf("Expected %q but Got %q", k8sURL, c.expected) - } + if k8sURL != c.expected { + t.Errorf("Expected %q but Got %q", k8sURL, c.expected) } }) } - } diff --git a/cmd/master.go b/cmd/master.go deleted file mode 100644 index 99d9cb38a..000000000 --- a/cmd/master.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright © 2017-2019 Aqua Security Software Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cmd - -import ( - "fmt" - - "github.com/aquasecurity/kube-bench/check" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -// masterCmd represents the master command -var masterCmd = &cobra.Command{ - Use: "master", - Short: "Run Kubernetes benchmark checks from the master.yaml file.", - Long: `Run Kubernetes benchmark checks from the master.yaml file in cfg/.`, - Run: func(cmd *cobra.Command, args []string) { - bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformInfo(), viper.GetViper()) - if err != nil { - exitWithError(fmt.Errorf("unable to determine benchmark version: %v", err)) - } - - filename := loadConfig(check.MASTER, bv) - runChecks(check.MASTER, filename, detecetedKubeVersion) - writeOutput(controlsCollection) - }, - Deprecated: "this command will be retired soon. Please use the `run` command with `--targets=master` instead.", -} - -func init() { - masterCmd.PersistentFlags().StringVarP(&masterFile, - "file", - "f", - "/master.yaml", - "Alternative YAML file for master checks", - ) - - RootCmd.AddCommand(masterCmd) -} diff --git a/cmd/node.go b/cmd/node.go deleted file mode 100644 index 5672297d2..000000000 --- a/cmd/node.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright © 2017-2019 Aqua Security Software Ltd. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cmd - -import ( - "fmt" - - "github.com/aquasecurity/kube-bench/check" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -// nodeCmd represents the node command -var nodeCmd = &cobra.Command{ - Use: "node", - Short: "Run Kubernetes benchmark checks from the node.yaml file.", - Long: `Run Kubernetes benchmark checks from the node.yaml file in cfg/.`, - Run: func(cmd *cobra.Command, args []string) { - bv, err := getBenchmarkVersion(kubeVersion, benchmarkVersion, getPlatformInfo(), viper.GetViper()) - if err != nil { - exitWithError(fmt.Errorf("unable to determine benchmark version: %v", err)) - } - - filename := loadConfig(check.NODE, bv) - runChecks(check.NODE, filename, detecetedKubeVersion) - writeOutput(controlsCollection) - }, - Deprecated: "this command will be retired soon. Please use the `run` command with `--targets=node` instead.", -} - -func init() { - nodeCmd.PersistentFlags().StringVarP(&nodeFile, - "file", - "f", - "/node.yaml", - "Alternative YAML file for node checks", - ) - - RootCmd.AddCommand(nodeCmd) -} diff --git a/cmd/root.go b/cmd/root.go index 785d021f3..c674aeb1a 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -136,8 +136,7 @@ var RootCmd = &cobra.Command{ } writeOutput(controlsCollection) - exitCode := exitCodeSelection(controlsCollection) - os.Exit(exitCode) + os.Exit(exitCodeSelection(controlsCollection)) }, } @@ -173,7 +172,7 @@ func init() { RootCmd.PersistentFlags().BoolVar(&filterOpts.Unscored, "unscored", true, "Run the unscored CIS checks") RootCmd.PersistentFlags().StringVar(&skipIds, "skip", "", "List of comma separated values of checks to be skipped") RootCmd.PersistentFlags().BoolVar(&includeTestOutput, "include-test-output", false, "Prints the actual result when test fails") - RootCmd.PersistentFlags().StringVar(&outputFile, "outputfile", "", "Writes the JSON results to output file") + RootCmd.PersistentFlags().StringVar(&outputFile, "outputfile", "", "Writes the results to output file when run with --json or --junit") RootCmd.PersistentFlags().StringVarP( &filterOpts.CheckList, @@ -201,7 +200,6 @@ func init() { goflag.CommandLine.VisitAll(func(goflag *goflag.Flag) { RootCmd.PersistentFlags().AddGoFlag(goflag) }) - } // initConfig reads in config file and ENV variables if set. diff --git a/cmd/run.go b/cmd/run.go index db92fe73c..8dd392275 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -54,13 +54,15 @@ var runCmd = &cobra.Command{ path := filepath.Join(cfgDir, bv) err = mergeConfig(path) if err != nil { - fmt.Printf("Error in mergeConfig: %v\n", err) + exitWithError(fmt.Errorf("Error in mergeConfig: %v\n", err)) } err = run(targets, bv) if err != nil { - fmt.Printf("Error in run: %v\n", err) + exitWithError(fmt.Errorf("Error in run: %v\n", err)) } + + os.Exit(exitCodeSelection(controlsCollection)) }, } diff --git a/cmd/run_test.go b/cmd/run_test.go index 2be443f28..7495e5cfb 100644 --- a/cmd/run_test.go +++ b/cmd/run_test.go @@ -1,7 +1,6 @@ package cmd import ( - "io/ioutil" "os" "path/filepath" "testing" @@ -45,7 +44,7 @@ func TestGetTestYamlFiles(t *testing.T) { // Set up temp config directory var err error - cfgDir, err = ioutil.TempDir("", "kube-bench-test") + cfgDir, err = os.MkdirTemp("", "kube-bench-test") if err != nil { t.Fatalf("Failed to create temp directory") } @@ -59,7 +58,7 @@ func TestGetTestYamlFiles(t *testing.T) { // We never expect config.yaml to be returned for _, filename := range []string{"one.yaml", "two.yaml", "three.yaml", "config.yaml"} { - err = ioutil.WriteFile(filepath.Join(d, filename), []byte("hello world"), 0666) + err = os.WriteFile(filepath.Join(d, filename), []byte("hello world"), 0666) if err != nil { t.Fatalf("error writing temp file %s: %v", filename, err) } diff --git a/cmd/securityHub.go b/cmd/securityHub.go index ae6fd817a..2c9d0a953 100644 --- a/cmd/securityHub.go +++ b/cmd/securityHub.go @@ -1,32 +1,32 @@ package cmd import ( + "context" "fmt" "log" "github.com/aquasecurity/kube-bench/internal/findings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/securityhub" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/securityhub" + "github.com/aws/aws-sdk-go-v2/service/securityhub/types" "github.com/spf13/viper" ) -//REGION ... +// REGION ... const REGION = "AWS_REGION" -func writeFinding(in []*securityhub.AwsSecurityFinding) error { +func writeFinding(in []types.AwsSecurityFinding) error { r := viper.GetString(REGION) if len(r) == 0 { return fmt.Errorf("%s not set", REGION) } - sess, err := session.NewSession(&aws.Config{ - Region: aws.String(r)}, - ) + cfg, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(r)) if err != nil { return err } - svc := securityhub.New(sess) - p := findings.New(svc) + + svc := securityhub.NewFromConfig(cfg) + p := findings.New(*svc) out, perr := p.PublishFinding(in) print(out) return perr diff --git a/cmd/util.go b/cmd/util.go index 365d5ef9f..275de2326 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -1,6 +1,7 @@ package cmd import ( + "context" "encoding/json" "fmt" "os" @@ -14,28 +15,33 @@ import ( "github.com/fatih/color" "github.com/golang/glog" "github.com/spf13/viper" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" ) +// Print colors +var colors = map[check.State]*color.Color{ + check.PASS: color.New(color.FgGreen), + check.FAIL: color.New(color.FgRed), + check.WARN: color.New(color.FgYellow), + check.INFO: color.New(color.FgBlue), +} + var ( - // Print colors - colors = map[check.State]*color.Color{ - check.PASS: color.New(color.FgGreen), - check.FAIL: color.New(color.FgRed), - check.WARN: color.New(color.FgYellow), - check.INFO: color.New(color.FgBlue), + psFunc func(string) string + statFunc func(string) (os.FileInfo, error) + getBinariesFunc func(*viper.Viper, check.NodeType) (map[string]string, error) + TypeMap = map[string][]string{ + "ca": {"cafile", "defaultcafile"}, + "kubeconfig": {"kubeconfig", "defaultkubeconfig"}, + "service": {"svc", "defaultsvc"}, + "config": {"confs", "defaultconf"}, + "datadir": {"datadirs", "defaultdatadir"}, } ) -var psFunc func(string) string -var statFunc func(string) (os.FileInfo, error) -var getBinariesFunc func(*viper.Viper, check.NodeType) (map[string]string, error) -var TypeMap = map[string][]string{ - "ca": {"cafile", "defaultcafile"}, - "kubeconfig": {"kubeconfig", "defaultkubeconfig"}, - "service": {"svc", "defaultsvc"}, - "config": {"confs", "defaultconf"}, -} - func init() { psFunc = ps statFunc = os.Stat @@ -126,7 +132,7 @@ func getConfigFilePath(benchmarkVersion string, filename string) (path string, e glog.V(2).Info(fmt.Sprintf("Looking for config specific CIS version %q", benchmarkVersion)) path = filepath.Join(cfgDir, benchmarkVersion) - file := filepath.Join(path, string(filename)) + file := filepath.Join(path, filename) glog.V(2).Info(fmt.Sprintf("Looking for file: %s", file)) if _, err := os.Stat(file); err != nil { @@ -208,7 +214,6 @@ func getFiles(v *viper.Viper, fileType string) map[string]string { // verifyBin checks that the binary specified is running func verifyBin(bin string) bool { - // Strip any quotes bin = strings.Trim(bin, "'\"") @@ -241,7 +246,7 @@ func findConfigFile(candidates []string) string { if err == nil { return c } - if !os.IsNotExist(err) { + if !os.IsNotExist(err) && !strings.HasSuffix(err.Error(), "not a directory") { exitWithError(fmt.Errorf("error looking for file %s: %v", c, err)) } } @@ -290,15 +295,35 @@ Alternatively, you can specify the version with --version ` func getKubeVersion() (*KubeVersion, error) { + kubeConfig, err := rest.InClusterConfig() + if err != nil { + glog.V(3).Infof("Error fetching cluster config: %s", err) + } + isRKE := false + if err == nil && kubeConfig != nil { + k8sClient, err := kubernetes.NewForConfig(kubeConfig) + if err != nil { + glog.V(3).Infof("Failed to fetch k8sClient object from kube config : %s", err) + } + + if err == nil { + isRKE, err = IsRKE(context.Background(), k8sClient) + if err != nil { + glog.V(3).Infof("Error detecting RKE cluster: %s", err) + } + } + } if k8sVer, err := getKubeVersionFromRESTAPI(); err == nil { glog.V(2).Info(fmt.Sprintf("Kubernetes REST API Reported version: %s", k8sVer)) + if isRKE { + k8sVer.GitVersion = k8sVer.GitVersion + "-rancher1" + } return k8sVer, nil } // These executables might not be on the user's path. - _, err := exec.LookPath("kubectl") - + _, err = exec.LookPath("kubectl") if err != nil { glog.V(3).Infof("Error locating kubectl: %s", err) _, err = exec.LookPath("kubelet") @@ -337,7 +362,6 @@ func getKubeVersionFromKubectl() *KubeVersion { func getKubeVersionFromKubelet() *KubeVersion { cmd := exec.Command("kubelet", "--version") out, err := cmd.CombinedOutput() - if err != nil { glog.V(2).Infof("Failed to query kubelet: %s", err) glog.V(2).Info(err) @@ -401,11 +425,9 @@ func makeSubstitutions(s string, ext string, m map[string]string) (string, []str func isEmpty(str string) bool { return strings.TrimSpace(str) == "" - } func buildComponentMissingErrorMessage(nodetype check.NodeType, component string, bins []string) string { - errMessageTemplate := ` Unable to detect running programs for component %q The following %q programs have been searched, but none of them have been found: @@ -452,7 +474,7 @@ func getPlatformInfo() Platform { } func getPlatformInfoFromVersion(s string) Platform { - versionRe := regexp.MustCompile(`v(\d+\.\d+)\.\d+-(\w+)(?:[.\-])\w+`) + versionRe := regexp.MustCompile(`v(\d+\.\d+)\.\d+[-+](\w+)(?:[.\-+]*)\w+`) subs := versionRe.FindStringSubmatch(s) if len(subs) < 3 { return Platform{} @@ -467,11 +489,13 @@ func getPlatformBenchmarkVersion(platform Platform) string { glog.V(3).Infof("getPlatformBenchmarkVersion platform: %s", platform) switch platform.Name { case "eks": - return "eks-1.0.1" + return "eks-1.2.0" case "gke": switch platform.Version { case "1.15", "1.16", "1.17", "1.18", "1.19": return "gke-1.0" + case "1.29", "1.30", "1.31": + return "gke-1.6.0" default: return "gke-1.2.0" } @@ -484,6 +508,35 @@ func getPlatformBenchmarkVersion(platform Platform) string { case "4.1": return "rh-1.0" } + case "vmware": + return "tkgi-1.2.53" + case "k3s": + switch platform.Version { + case "1.23": + return "k3s-cis-1.23" + case "1.24": + return "k3s-cis-1.24" + case "1.25", "1.26", "1.27": + return "k3s-cis-1.7" + } + case "rancher": + switch platform.Version { + case "1.23": + return "rke-cis-1.23" + case "1.24": + return "rke-cis-1.24" + case "1.25", "1.26", "1.27": + return "rke-cis-1.7" + } + case "rke2r": + switch platform.Version { + case "1.23": + return "rke2-cis-1.23" + case "1.24": + return "rke2-cis-1.24" + case "1.25", "1.26", "1.27": + return "rke2-cis-1.7" + } } return "" } @@ -538,3 +591,37 @@ func getOcpValidVersion(ocpVer string) (string, error) { glog.V(1).Info(fmt.Sprintf("getOcpBenchmarkVersion unable to find a match for: %q", ocpOriginal)) return "", fmt.Errorf("unable to find a matching Benchmark Version match for ocp version: %s", ocpOriginal) } + +// IsRKE Identifies if the cluster belongs to Rancher Distribution RKE +func IsRKE(ctx context.Context, k8sClient kubernetes.Interface) (bool, error) { + // if there are windows nodes then this should not be counted as rke.linux + windowsNodes, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ + Limit: 1, + LabelSelector: "kubernetes.io/os=windows", + }) + if err != nil { + return false, err + } + if len(windowsNodes.Items) != 0 { + return false, nil + } + + // Any node created by RKE should have the annotation, so just grab 1 + nodes, err := k8sClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{Limit: 1}) + if err != nil { + return false, err + } + + if len(nodes.Items) == 0 { + return false, nil + } + + annos := nodes.Items[0].Annotations + if _, ok := annos["rke.cattle.io/external-ip"]; ok { + return true, nil + } + if _, ok := annos["rke.cattle.io/internal-ip"]; ok { + return true, nil + } + return false, nil +} diff --git a/cmd/util_test.go b/cmd/util_test.go index 4d88abc87..2c24a7a95 100644 --- a/cmd/util_test.go +++ b/cmd/util_test.go @@ -15,7 +15,7 @@ package cmd import ( - "io/ioutil" + "errors" "os" "path/filepath" "reflect" @@ -29,9 +29,11 @@ import ( "github.com/spf13/viper" ) -var g string -var e []error -var eIndex int +var ( + g string + e []error + eIndex int +) func fakeps(proc string) string { return g @@ -132,7 +134,7 @@ func TestGetBinaries(t *testing.T) { expectErr: false, }, { - // "anotherthing" in list of components but doesn't have a defintion + // "anotherthing" in list of components but doesn't have a definition config: map[string]interface{}{"components": []string{"apiserver", "anotherthing"}, "apiserver": map[string]interface{}{"bins": []string{"apiserver", "kube-apiserver"}}, "thing": map[string]interface{}{"bins": []string{"something else", "thing"}}}, psOut: "kube-apiserver thing", exp: map[string]string{"apiserver": "kube-apiserver"}, @@ -231,6 +233,7 @@ func TestFindConfigFile(t *testing.T) { {input: []string{"myfile"}, statResults: []error{nil}, exp: "myfile"}, {input: []string{"thisfile", "thatfile"}, statResults: []error{os.ErrNotExist, nil}, exp: "thatfile"}, {input: []string{"thisfile", "thatfile"}, statResults: []error{os.ErrNotExist, os.ErrNotExist}, exp: ""}, + {input: []string{"thisfile", "/etc/dummy/thatfile"}, statResults: []error{os.ErrNotExist, errors.New("stat /etc/dummy/thatfile: not a directory")}, exp: ""}, } statFunc = fakestat @@ -262,7 +265,8 @@ func TestGetConfigFiles(t *testing.T) { config: map[string]interface{}{ "components": []string{"apiserver"}, "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}, - "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}}, + "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}, + }, statResults: []error{os.ErrNotExist, nil}, exp: map[string]string{"apiserver": "kube-apiserver"}, }, @@ -271,7 +275,8 @@ func TestGetConfigFiles(t *testing.T) { config: map[string]interface{}{ "components": []string{"apiserver", "thing"}, "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}, - "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}}, + "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}, + }, statResults: []error{os.ErrNotExist, nil, nil}, exp: map[string]string{"apiserver": "kube-apiserver", "thing": "/my/file/thing"}, }, @@ -280,7 +285,8 @@ func TestGetConfigFiles(t *testing.T) { config: map[string]interface{}{ "components": []string{"apiserver", "thing"}, "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}, - "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}, "defaultconf": "another/thing"}}, + "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}, "defaultconf": "another/thing"}, + }, statResults: []error{os.ErrNotExist, nil, os.ErrNotExist}, exp: map[string]string{"apiserver": "kube-apiserver", "thing": "another/thing"}, }, @@ -289,7 +295,8 @@ func TestGetConfigFiles(t *testing.T) { config: map[string]interface{}{ "components": []string{"apiserver", "thing"}, "apiserver": map[string]interface{}{"confs": []string{"apiserver", "kube-apiserver"}}, - "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}}, + "thing": map[string]interface{}{"confs": []string{"/my/file/thing"}}, + }, statResults: []error{os.ErrNotExist, nil, os.ErrNotExist}, exp: map[string]string{"apiserver": "kube-apiserver", "thing": "thing"}, }, @@ -389,6 +396,58 @@ func TestGetServiceFiles(t *testing.T) { } } +func TestGetDatadirFiles(t *testing.T) { + var err error + datadir, err := os.MkdirTemp("", "kube-bench-test-etcd-data-dir") + if err != nil { + t.Fatalf("Failed to create temp directory") + } + defer os.RemoveAll(datadir) + + cases := []struct { + config map[string]interface{} + exp map[string]string + statResults []error + }{ + { + config: map[string]interface{}{ + "components": []string{"etcd"}, + "etcd": map[string]interface{}{"datadirs": []string{datadir}, + "defaultdatadir": "/var/lib/etcd/default.etcd"}, + }, + statResults: []error{nil}, + exp: map[string]string{"etcd": datadir}, + }, + // fallback to defaultdatadir + { + config: map[string]interface{}{ + "components": []string{"etcd"}, + "etcd": map[string]interface{}{"datadirs": []string{"/path/to/etcd/data.etcd"}, + "defaultdatadir": "/var/lib/etcd/default.etcd"}, + }, + statResults: []error{os.ErrNotExist}, + exp: map[string]string{"etcd": "/var/lib/etcd/default.etcd"}, + }, + } + + v := viper.New() + statFunc = fakestat + + for id, c := range cases { + t.Run(strconv.Itoa(id), func(t *testing.T) { + for k, val := range c.config { + v.Set(k, val) + } + e = c.statResults + eIndex = 0 + m := getFiles(v, "datadir") + if !reflect.DeepEqual(m, c.exp) { + t.Fatalf("Got %v\nExpected %v", m, c.exp) + } + }) + } +} + func TestMakeSubsitutions(t *testing.T) { cases := []struct { input string @@ -414,7 +473,7 @@ func TestMakeSubsitutions(t *testing.T) { func TestGetConfigFilePath(t *testing.T) { var err error - cfgDir, err = ioutil.TempDir("", "kube-bench-test") + cfgDir, err = os.MkdirTemp("", "kube-bench-test") if err != nil { t.Fatalf("Failed to create temp directory") } @@ -424,7 +483,7 @@ func TestGetConfigFilePath(t *testing.T) { if err != nil { t.Fatalf("Failed to create temp dir") } - err = ioutil.WriteFile(filepath.Join(d, "master.yaml"), []byte("hello world"), 0666) + err = os.WriteFile(filepath.Join(d, "master.yaml"), []byte("hello world"), 0666) if err != nil { t.Logf("Failed to create temp file") } @@ -459,7 +518,6 @@ func TestGetConfigFilePath(t *testing.T) { } func TestDecrementVersion(t *testing.T) { - cases := []struct { kubeVersion string succeed bool @@ -486,7 +544,7 @@ func TestDecrementVersion(t *testing.T) { } func TestGetYamlFilesFromDir(t *testing.T) { - cfgDir, err := ioutil.TempDir("", "kube-bench-test") + cfgDir, err := os.MkdirTemp("", "kube-bench-test") if err != nil { t.Fatalf("Failed to create temp directory") } @@ -498,11 +556,11 @@ func TestGetYamlFilesFromDir(t *testing.T) { t.Fatalf("Failed to create temp dir") } - err = ioutil.WriteFile(filepath.Join(d, "something.yaml"), []byte("hello world"), 0666) + err = os.WriteFile(filepath.Join(d, "something.yaml"), []byte("hello world"), 0666) if err != nil { t.Fatalf("error writing file %v", err) } - err = ioutil.WriteFile(filepath.Join(d, "config.yaml"), []byte("hello world"), 0666) + err = os.WriteFile(filepath.Join(d, "config.yaml"), []byte("hello world"), 0666) if err != nil { t.Fatalf("error writing file %v", err) } @@ -554,6 +612,21 @@ func Test_getPlatformNameFromKubectlOutput(t *testing.T) { args: args{s: ""}, want: Platform{}, }, + { + name: "k3s", + args: args{s: "v1.27.6+k3s1"}, + want: Platform{Name: "k3s", Version: "1.27"}, + }, + { + name: "rancher1", + args: args{s: "v1.25.13-rancher1-1"}, + want: Platform{Name: "rancher1", Version: "1.25"}, + }, + { + name: "rke2", + args: args{s: "v1.27.6+rke2r1"}, + want: Platform{Name: "rke2r", Version: "1.27"}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -577,7 +650,7 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) { args: args{ platform: Platform{Name: "eks"}, }, - want: "eks-1.0.1", + want: "eks-1.2.0", }, { name: "gke 1.19", @@ -635,6 +708,27 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) { }, want: "rh-1.0", }, + { + name: "k3s", + args: args{ + platform: Platform{Name: "k3s", Version: "1.27"}, + }, + want: "k3s-cis-1.7", + }, + { + name: "rancher1", + args: args{ + platform: Platform{Name: "rancher", Version: "1.27"}, + }, + want: "rke-cis-1.7", + }, + { + name: "rke2", + args: args{ + platform: Platform{Name: "rke2r", Version: "1.27"}, + }, + want: "rke2-cis-1.7", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -646,7 +740,6 @@ func Test_getPlatformBenchmarkVersion(t *testing.T) { } func Test_getOcpValidVersion(t *testing.T) { - cases := []struct { openShiftVersion string succeed bool diff --git a/cmd/version.go b/cmd/version.go index beb046972..e9b2dcd39 100644 --- a/cmd/version.go +++ b/cmd/version.go @@ -2,6 +2,7 @@ package cmd import ( "fmt" + "github.com/spf13/cobra" ) diff --git a/docs/architecture.md b/docs/architecture.md index b2a48f485..c65978f0a 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -13,17 +13,33 @@ Check the contents of the benchmark directory under `cfg` to see which targets a The following table shows the valid targets based on the CIS Benchmark version. -| CIS Benchmark | Targets | -|---|---| -| cis-1.5| master, controlplane, node, etcd, policies | -| cis-1.6| master, controlplane, node, etcd, policies | -|cis-1.20| master, controlplane, node, etcd, policies | -| gke-1.0| master, controlplane, node, etcd, policies, managedservices | -| gke-1.2.0| controlplane, node, policies, managedservices | -| eks-1.0.1| controlplane, node, policies, managedservices | -| ack-1.0| master, controlplane, node, etcd, policies, managedservices | -| aks-1.0| controlplane, node, policies, managedservices | -| rh-0.7| master,node| -| rh-1.0| master, controlplane, node, etcd, policies | +| CIS Benchmark | Targets | +|----------------------|---------| +| cis-1.5 | master, controlplane, node, etcd, policies | +| cis-1.6 | master, controlplane, node, etcd, policies | +| cis-1.20 | master, controlplane, node, etcd, policies | +| cis-1.23 | master, controlplane, node, etcd, policies | +| cis-1.24 | master, controlplane, node, etcd, policies | +| cis-1.7 | master, controlplane, node, etcd, policies | +| cis-1.8 | master, controlplane, node, etcd, policies | +| cis-1.9 | master, controlplane, node, etcd, policies | +| gke-1.0 | master, controlplane, node, etcd, policies, managedservices | +| gke-1.2.0 | controlplane, node, policies, managedservices | +| gke-1.6.0 | controlplane, node, policies, managedservices | +| eks-1.0.1 | controlplane, node, policies, managedservices | +| eks-1.1.0 | controlplane, node, policies, managedservices | +| eks-1.2.0 | controlplane, node, policies, managedservices | +| ack-1.0 | master, controlplane, node, etcd, policies, managedservices | +| aks-1.0 | controlplane, node, policies, managedservices | +| rh-0.7 | master,node| +| rh-1.0 | master, controlplane, node, etcd, policies | +| cis-1.6-k3s | master, controlplane, node, etcd, policies | +| cis-1.24-microk8s | master, controlplane, node, etcd, policies | + +The following table shows the valid DISA STIG versions + +| STIG | Targets | +|----------------------------|---------| +| eks-stig-kubernetes-v1r6 | master, controlplane, node, policies, managedservices | diff --git a/docs/flags-and-commands.md b/docs/flags-and-commands.md index 27a254462..1d6605bce 100644 --- a/docs/flags-and-commands.md +++ b/docs/flags-and-commands.md @@ -23,12 +23,13 @@ Flag | Description --noremediations | Disable printing of remediations section to stdout. --noresults | Disable printing of results section to stdout. --nototals | Disable calculating and printing of totals for failed, passed, ... checks across all sections ---outputfile | Writes the JSON results to output file +--outputfile | Writes the results to output file when run with --json or --junit --pgsql | Save the results to PostgreSQL --scored | Run the scored CIS checks (default true) --skip string | List of comma separated values of checks to be skipped --stderrthreshold severity | logs at or above this threshold go to stderr (default 2) -v, --v Level | log level for V logs (default 0) +--unscored | Run the unscored CIS checks (default true) --version string | Manually specify Kubernetes version, automatically detected if unset --vmodule moduleSpec | comma-separated list of pattern=N settings for file-filtered logging @@ -112,7 +113,8 @@ There are four output states: - [INFO] is informational output that needs no further action. Note: -- If the test is Manual, this always generates WARN (because the user has to run it manually) +- Some tests with `Automated` in their description must still be run manually +- If the user has to run a test manually, this always generates WARN - If the test is Scored, and kube-bench was unable to run the test, this generates FAIL (because the test has not been passed, and as a Scored test, if it doesn't pass then it must be considered a failure). - If the test is Not Scored, and kube-bench was unable to run the test, this generates WARN. - If the test is Scored, type is empty, and there are no `test_items` present, it generates a WARN. This is to highlight tests that appear to be incompletely defined. diff --git a/docs/index.md b/docs/index.md index 815db57d0..3aa6faaf5 100644 --- a/docs/index.md +++ b/docs/index.md @@ -2,6 +2,7 @@ [release-img]: https://img.shields.io/github/release/aquasecurity/kube-bench.svg?logo=github [release]: https://github.com/aquasecurity/kube-bench/releases [docker-pull]: https://img.shields.io/docker/pulls/aquasec/kube-bench?logo=docker&label=docker%20pulls%20%2F%20kube-bench +[docker]: https://hub.docker.com/r/aquasec/kube-bench [cov-img]: https://codecov.io/github/aquasecurity/kube-bench/branch/main/graph/badge.svg [cov]: https://codecov.io/github/aquasecurity/kube-bench [report-card-img]: https://goreportcard.com/badge/github.com/aquasecurity/kube-bench @@ -9,13 +10,11 @@ ![Kube-bench Logo](images/kube-bench.jpg) [![GitHub Release][release-img]][release] -![Downloads][download] -![Docker Pulls][docker-pull] +[![Downloads][download]][release] +[![Docker Pulls][docker-pull]][docker] [![Go Report Card][report-card-img]][report-card] [![Build Status](https://github.com/aquasecurity/kube-bench/workflows/Build/badge.svg?branch=main)](https://github.com/aquasecurity/kube-bench/actions) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/aquasecurity/kube-bench/blob/main/LICENSE) -[![Docker image](https://images.microbadger.com/badges/image/aquasec/kube-bench.svg)](https://microbadger.com/images/aquasec/kube-bench "Get your own image badge on microbadger.com") -[![Source commit](https://images.microbadger.com/badges/commit/aquasec/kube-bench.svg)](https://microbadger.com/images/aquasec/kube-bench) [![Coverage Status][cov-img]][cov] diff --git a/docs/installation.md b/docs/installation.md index 6d42c0c38..373a7c5fe 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -84,7 +84,7 @@ go build -o kube-bench . This command copies the kube-bench binary and configuration files to your host from the Docker container: **binaries compiled for linux-x86-64 only (so they won't run on macOS or Windows)** ``` -docker run --rm -v `pwd`:/host aquasec/kube-bench:latest install +docker run --rm -v `pwd`:/host docker.io/aquasec/kube-bench:latest install ``` You can then run `./kube-bench`. diff --git a/docs/platforms.md b/docs/platforms.md index cf0477471..d6fbcf712 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -2,18 +2,35 @@ ## CIS Kubernetes Benchmark support kube-bench supports running tests for Kubernetes. -Most of our supported benchmarks are defined in the [CIS Kubernetes Benchmarks](https://www.cisecurity.org/benchmark/kubernetes/). +Most of our supported benchmarks are defined in one of the following: + [CIS Kubernetes Benchmarks](https://www.cisecurity.org/benchmark/kubernetes/) + [STIG Document Library](https://public.cyber.mil/stigs/downloads) + Some defined by other hardenening guides. -| Source | Kubernetes Benchmark | kube-bench config | Kubernetes versions | -|---|---|---|---| -| CIS | [1.5.1](https://workbench.cisecurity.org/benchmarks/4892) | cis-1.5 | 1.15 | -| CIS | [1.6.0](https://workbench.cisecurity.org/benchmarks/4834) | cis-1.6 | 1.16-1.18 | -| CIS | [1.20](https://workbench.cisecurity.org/benchmarks/6246) | cis-1.20 | 1.19-1.20 | -| CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | -| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | -| CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | -| CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | -| CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | -| RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | -| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | +| Source | Kubernetes Benchmark | kube-bench config | Kubernetes versions | +|--------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------|---------------------| +| CIS | [1.5.1](https://workbench.cisecurity.org/benchmarks/4892) | cis-1.5 | 1.15 | +| CIS | [1.6.0](https://workbench.cisecurity.org/benchmarks/4834) | cis-1.6 | 1.16-1.18 | +| CIS | [1.20](https://workbench.cisecurity.org/benchmarks/6246) | cis-1.20 | 1.19-1.21 | +| CIS | [1.23](https://workbench.cisecurity.org/benchmarks/7532) | cis-1.23 | 1.22-1.23 | +| CIS | [1.24](https://workbench.cisecurity.org/benchmarks/10873) | cis-1.24 | 1.24 | +| CIS | [1.7](https://workbench.cisecurity.org/benchmarks/11107) | cis-1.7 | 1.25 | +| CIS | [1.8](https://workbench.cisecurity.org/benchmarks/12958) | cis-1.8 | 1.26 | +| CIS | [1.9](https://workbench.cisecurity.org/benchmarks/16828) | cis-1.9 | 1.27-1.29 | +| CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | +| CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | +| CIS | [GKE 1.6.0](https://workbench.cisecurity.org/benchmarks/16093) | gke-1.6.0 | GKE | +| CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | +| CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS | +| CIS | [EKS 1.2.0](https://workbench.cisecurity.org/benchmarks/9681) | eks-1.2.0 | EKS | +| CIS | [ACK 1.0.0](https://workbench.cisecurity.org/benchmarks/6467) | ack-1.0 | ACK | +| CIS | [AKS 1.0.0](https://workbench.cisecurity.org/benchmarks/6347) | aks-1.0 | AKS | +| RHEL | RedHat OpenShift hardening guide | rh-0.7 | OCP 3.10-3.11 | +| CIS | [OCP4 1.1.0](https://workbench.cisecurity.org/benchmarks/6778) | rh-1.0 | OCP 4.1- | +| CIS | [1.6.0-k3s](https://docs.rancher.cn/docs/k3s/security/self-assessment/_index) | cis-1.6-k3s | k3s v1.16-v1.24 | +| DISA | [Kubernetes Ver 1, Rel 6](https://dl.dod.cyber.mil/wp-content/uploads/stigs/zip/U_Kubernetes_V1R6_STIG.zip) | eks-stig-kubernetes-v1r6 | EKS | +| CIS | [TKGI 1.2.53](https://network.pivotal.io/products/p-compliance-scanner#/releases/1248397) | tkgi-1.2.53 | vmware | +| CIS | [1.7.0-rke](https://ranchermanager.docs.rancher.com/v2.7/reference-guides/rancher-security/hardening-guides/rke1-hardening-guide/rke1-self-assessment-guide-with-cis-v1.7-k8s-v1.25-v1.26-v1.27) | rke-cis-1.7 | rke v1.25-v1.27 | +| CIS | [1.7.0-rke2](https://ranchermanager.docs.rancher.com/v2.7/reference-guides/rancher-security/hardening-guides/rke2-hardening-guide/rke2-self-assessment-guide-with-cis-v1.7-k8s-v1.25-v1.26-v1.27) | rke2-cis-1.6 | rke2 v1.25-v1.27 | +| CIS | [1.7.0-k3s](https://ranchermanager.docs.rancher.com/v2.7/reference-guides/rancher-security/hardening-guides/k3s-hardening-guide/k3s-self-assessment-guide-with-cis-v1.7-k8s-v1.25-v1.26-v1.27) | k3s-cis-1.7 | k3s v1.25-v1.27 | diff --git a/docs/running.md b/docs/running.md index 0c7889f04..c482a78b6 100644 --- a/docs/running.md +++ b/docs/running.md @@ -15,26 +15,26 @@ It is impossible to inspect the master nodes of managed clusters, e.g. GKE, EKS, You can avoid installing kube-bench on the host by running it inside a container using the host PID namespace and mounting the `/etc` and `/var` directories where the configuration and other files are located on the host so that kube-bench can check their existence and permissions. ``` -docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -t aquasec/kube-bench:latest --version 1.18 +docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -t docker.io/aquasec/kube-bench:latest --version 1.18 ``` > Note: the tests require either the kubelet or kubectl binary in the path in order to auto-detect the Kubernetes version. You can pass `-v $(which kubectl):/usr/local/mount-from-host/bin/kubectl` to resolve this. You will also need to pass in kubeconfig credentials. For example: ``` -docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -v $(which kubectl):/usr/local/mount-from-host/bin/kubectl -v ~/.kube:/.kube -e KUBECONFIG=/.kube/config -t aquasec/kube-bench:latest +docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -v $(which kubectl):/usr/local/mount-from-host/bin/kubectl -v ~/.kube:/.kube -e KUBECONFIG=/.kube/config -t docker.io/aquasec/kube-bench:latest ``` You can use your own configs by mounting them over the default ones in `/opt/kube-bench/cfg/` ``` -docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -t -v path/to/my-config.yaml:/opt/kube-bench/cfg/config.yaml -v $(which kubectl):/usr/local/mount-from-host/bin/kubectl -v ~/.kube:/.kube -e KUBECONFIG=/.kube/config aquasec/kube-bench:latest +docker run --pid=host -v /etc:/etc:ro -v /var:/var:ro -t -v path/to/my-config.yaml:/opt/kube-bench/cfg/config.yaml -v $(which kubectl):/usr/local/mount-from-host/bin/kubectl -v ~/.kube:/.kube -e KUBECONFIG=/.kube/config docker.io/aquasec/kube-bench:latest ``` ### Running in a Kubernetes cluster You can run kube-bench inside a pod, but it will need access to the host's PID namespace in order to check the running processes, as well as access to some directories on the host where config files and other files are stored. -The supplied `job.yaml` file can be applied to run the tests as a job. For example: +The `job.yaml` file (available in the root directory of the repository) can be applied to run the tests as a Kubernetes `Job`. For example: ```bash $ kubectl apply -f job.yaml @@ -72,12 +72,12 @@ could open nsg 22 port and assign a public ip for one agent node (only for testi 1. Run CIS benchmark to view results: ``` -docker run --rm -v `pwd`:/host aquasec/kube-bench:latest install +docker run --rm -v `pwd`:/host docker.io/aquasec/kube-bench:latest install ./kube-bench ``` kube-bench cannot be run on AKS master nodes -### Running in an EKS cluster +### Running CIS benchmark in an EKS cluster There is a `job-eks.yaml` file for running the kube-bench node checks on an EKS cluster. The significant difference on EKS is that it's not possible to schedule jobs onto the master node, so master checks can't be performed @@ -92,7 +92,7 @@ aws ecr create-repository --repository-name k8s/kube-bench --image-tag-mutabilit git clone https://github.com/aquasecurity/kube-bench.git cd kube-bench aws ecr get-login-password --region | docker login --username AWS --password-stdin .dkr.ecr..amazonaws.com -docker build -t k8s/kube-bench . +make build-docker IMAGE_NAME=k8s/kube-bench docker tag k8s/kube-bench:latest .dkr.ecr..amazonaws.com/k8s/kube-bench:latest docker push .dkr.ecr..amazonaws.com/k8s/kube-bench:latest ``` @@ -103,26 +103,60 @@ docker push .dkr.ecr..amazonaws.com/k8s/kube-bench: 8. Retrieve the value of this Pod and output the report, note the Pod name will vary: `kubectl logs kube-bench-` - You can save the report for later reference: `kubectl logs kube-bench- > kube-bench-report.txt` +### Running DISA STIG in an EKS cluster + +There is a `job-eks-stig.yaml` file for running the kube-bench node checks on an EKS cluster. The significant difference on EKS is that it's not possible to schedule jobs onto the master node, so master checks can't be performed + +1. To create an EKS Cluster refer to [Getting Started with Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) in the *Amazon EKS User Guide* + - Information on configuring `eksctl`, `kubectl` and the AWS CLI is within +2. Create an [Amazon Elastic Container Registry (ECR)](https://docs.aws.amazon.com/AmazonECR/latest/userguide/what-is-ecr.html) repository to host the kube-bench container image +``` +aws ecr create-repository --repository-name k8s/kube-bench --image-tag-mutability MUTABLE +``` +3. Download, build and push the kube-bench container image to your ECR repo +``` +git clone https://github.com/aquasecurity/kube-bench.git +cd kube-bench +aws ecr get-login-password --region | docker login --username AWS --password-stdin .dkr.ecr..amazonaws.com +docker build -t k8s/kube-bench . +docker tag k8s/kube-bench:latest .dkr.ecr..amazonaws.com/k8s/kube-bench:latest +docker push .dkr.ecr..amazonaws.com/k8s/kube-bench:latest +``` +4. Copy the URI of your pushed image, the URI format is like this: `.dkr.ecr..amazonaws.com/k8s/kube-bench:latest` +5. Replace the `image` value in `job-eks-stig.yaml` with the URI from Step 4 +6. Run the kube-bench job on a Pod in your Cluster: `kubectl apply -f job-eks-stig.yaml` +7. Find the Pod that was created, it *should* be in the `default` namespace: `kubectl get pods --all-namespaces` +8. Retrieve the value of this Pod and output the report, note the Pod name will vary: `kubectl logs kube-bench-` + - You can save the report for later reference: `kubectl logs kube-bench- > kube-bench-report.txt` ### Running on OpenShift | OpenShift Hardening Guide | kube-bench config | -|---|---| -| ocp-3.10 +| rh-0.7 | -| ocp-4.1 +| rh-1.0 | +| ------------------------- | ----------------- | +| ocp-3.10 + | rh-0.7 | +| ocp-4.1 + | rh-1.0 | kube-bench includes a set of test files for Red Hat's OpenShift hardening guide for OCP 3.10 and 4.1. To run this you will need to specify `--benchmark rh-07`, or `--version ocp-3.10` or,`--version ocp-4.5` or `--benchmark rh-1.0` `kube-bench` supports auto-detection, when you run the `kube-bench` command it will autodetect if running in openshift environment. +Since running `kube-bench` requires elevated privileges, the `privileged` SecurityContextConstraint needs to be applied to the ServiceAccount used for the `Job`: + +``` +oc create namespace kube-bench +oc adm policy add-scc-to-user privileged --serviceaccount default +oc apply -f job.yaml +``` + ### Running in a GKE cluster -| CIS Benchmark | Targets | -|---|---| -| gke-1.0| master, controlplane, node, etcd, policies, managedservices | -| gke-1.2.0| master, controlplane, node, policies, managedservices | +| CIS Benchmark | Targets | +| ------------- | ----------------------------------------------------------- | +| gke-1.0 | master, controlplane, node, etcd, policies, managedservices | +| gke-1.2.0 | master, controlplane, node, policies, managedservices | +| gke-1.6.0 | master, controlplane, node, policies, managedservices | -kube-bench includes benchmarks for GKE. To run this you will need to specify `--benchmark gke-1.0` or `--benchmark gke-1.2.0` when you run the `kube-bench` command. +kube-bench includes benchmarks for GKE. To run this you will need to specify `--benchmark gke-1.0`, `--benchmark gke-1.2.0` or `--benchmark gke-1.6.0` when you run the `kube-bench` command. To run the benchmark as a job in your GKE cluster apply the included `job-gke.yaml`. @@ -132,9 +166,9 @@ kubectl apply -f job-gke.yaml ### Running in a ACK cluster -| CIS Benchmark | Targets | -|---|---| -| ack-1.0| master, controlplane, node, etcd, policies, managedservices | +| CIS Benchmark | Targets | +| ------------- | ----------------------------------------------------------- | +| ack-1.0 | master, controlplane, node, etcd, policies, managedservices | kube-bench includes benchmarks for Alibaba Cloud Container Service For Kubernetes (ACK). To run this you will need to specify `--benchmark ack-1.0` when you run the `kube-bench` command. @@ -144,3 +178,45 @@ To run the benchmark as a job in your ACK cluster apply the included `job-ack.ya ``` kubectl apply -f job-ack.yaml ``` + +### Running in a VMware TKGI cluster + +| CIS Benchmark | Targets | +|---------------|--------------------------------------------| +| tkgi-1.2.53 | master, etcd, controlplane, node, policies | + +kube-bench includes benchmarks for VMware tkgi platform. +To run this you will need to specify `--benchmark tkgi-1.2.53` when you run the `kube-bench` command. + +To run the benchmark as a job in your VMware tkgi cluster apply the included `job-tkgi.yaml`. + +``` +kubectl apply -f job-tkgi.yaml +``` + +### Running in a Rancher RKE cluster + +| CIS Benchmark | Targets | +|---------------|--------------------------------------------| +| rke-cis-1.7 | master, etcd, controlplane, node, policies | + +kube-bench includes benchmarks for Rancher RKE platform. +To run this you will need to specify `--benchmark rke-cis-1.7` when you run the `kube-bench` command. + +### Running in a Rancher RKE2 cluster + +| CIS Benchmark | Targets | +|---------------|--------------------------------------------| +| rke2-cis-1.7 | master, etcd, controlplane, node, policies | + +kube-bench includes benchmarks for Rancher RKE2 platform. +To run this you will need to specify `--benchmark rke2-cis-1.7` when you run the `kube-bench` command. + +### Running in a Rancher K3s cluster + +| CIS Benchmark | Targets | +|---------------|--------------------------------------------| +| k3s-cis-1.7 | master, etcd, controlplane, node, policies | + +kube-bench includes benchmarks for Rancher K3S platform. +To run this you will need to specify `--benchmark k3s-cis-1.7` when you run the `kube-bench` command. diff --git a/fipsonly.go b/fipsonly.go new file mode 100644 index 000000000..807bdf301 --- /dev/null +++ b/fipsonly.go @@ -0,0 +1,7 @@ +//go:build fipsonly + +package main + +import ( + _ "crypto/tls/fipsonly" +) diff --git a/go.mod b/go.mod index e318d34e2..c2c74e679 100644 --- a/go.mod +++ b/go.mod @@ -1,19 +1,102 @@ module github.com/aquasecurity/kube-bench -go 1.16 +go 1.22.0 + +toolchain go1.22.7 require ( - github.com/aws/aws-sdk-go v1.42.14 - github.com/fatih/color v1.13.0 - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/magiconair/properties v1.8.5 + github.com/aws/aws-sdk-go-v2 v1.31.0 + github.com/aws/aws-sdk-go-v2/config v1.27.37 + github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.3 + github.com/fatih/color v1.16.0 + github.com/golang/glog v1.2.2 + github.com/magiconair/properties v1.8.7 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.3.0 - github.com/spf13/viper v1.10.0 - github.com/stretchr/testify v1.7.0 + github.com/spf13/cobra v1.8.0 + github.com/spf13/viper v1.18.2 + github.com/stretchr/testify v1.9.0 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.2.2 - gorm.io/gorm v1.22.3 - k8s.io/client-go v0.23.1 + gorm.io/driver/postgres v1.5.9 + gorm.io/gorm v1.25.12 + k8s.io/apimachinery v0.31.2 + k8s.io/client-go v0.31.2 +) + +require ( + github.com/aws/aws-sdk-go-v2/credentials v1.17.35 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.23.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.31.1 // indirect + github.com/aws/smithy-go v1.21.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgx/v5 v5.5.5 // indirect + github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.5 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.31.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 9f8ef6d77..aaed1a6bf 100644 --- a/go.sum +++ b/go.sum @@ -1,1011 +1,304 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.42.14 h1:Eqwjl/cwRY9z0TTU4RGpiElWo9oPzG+Y8r5Thu6Ug5A= -github.com/aws/aws-sdk-go v1.42.14/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= -github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/aws/aws-sdk-go-v2 v1.31.0 h1:3V05LbxTSItI5kUqNwhJrrrY1BAXxXt0sN0l72QmG5U= +github.com/aws/aws-sdk-go-v2 v1.31.0/go.mod h1:ztolYtaEUtdpf9Wftr31CJfLVjOnD/CVRkKOOYgF8hA= +github.com/aws/aws-sdk-go-v2/config v1.27.37 h1:xaoIwzHVuRWRHFI0jhgEdEGc8xE1l91KaeRDsWEIncU= +github.com/aws/aws-sdk-go-v2/config v1.27.37/go.mod h1:S2e3ax9/8KnMSyRVNd3sWTKs+1clJ2f1U6nE0lpvQRg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.35 h1:7QknrZhYySEB1lEXJxGAmuD5sWwys5ZXNr4m5oEz0IE= +github.com/aws/aws-sdk-go-v2/credentials v1.17.35/go.mod h1:8Vy4kk7at4aPSmibr7K+nLTzG6qUQAUO4tW49fzUV4E= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18 h1:kYQ3H1u0ANr9KEKlGs/jTLrBFPo8P8NaH/w7A01NeeM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.18/go.mod h1:r506HmK5JDUh9+Mw4CfGJGSSoqIiLCndAuqXuhbv67Y= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18 h1:Z7IdFUONvTcvS7YuhtVxN99v2cCoHRXOS4mTr0B/pUc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.18/go.mod h1:DkKMmksZVVyat+Y+r1dEOgJEfUeA7UngIHWeKsi0yNc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 h1:QFASJGfT8wMXtuP3D5CRmMjARHv9ZmzFUMJznHDOY3w= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5/go.mod h1:QdZ3OmoIjSX+8D1OPAzPxDfjXASbBMDsz9qvtyIhtik= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 h1:Xbwbmk44URTiHNx6PNo0ujDE6ERlsCKJD3u1zfnzAPg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20/go.mod h1:oAfOFzUB14ltPZj1rWwRc3d/6OgD76R8KlvU3EqM9Fg= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.3 h1:YSmEnPSWj74eOtbXG4Z2J+GTQjBrz7w2wP01isHFZwU= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.53.3/go.mod h1:QFtYEC35t39ftJ6emZgapzdtBjGZsuR4bAd73SiG23I= +github.com/aws/aws-sdk-go-v2/service/sso v1.23.1 h1:2jrVsMHqdLD1+PA4BA6Nh1eZp0Gsy3mFSB5MxDvcJtU= +github.com/aws/aws-sdk-go-v2/service/sso v1.23.1/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1 h1:0L7yGCg3Hb3YQqnSgBTZM5wepougtL1aEccdcdYhHME= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E= +github.com/aws/aws-sdk-go-v2/service/sts v1.31.1 h1:8K0UNOkZiK9Uh3HIF6Bx0rcNCftqGCeKmOaR7Gp5BSo= +github.com/aws/aws-sdk-go-v2/service/sts v1.31.1/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI= +github.com/aws/smithy-go v1.21.0 h1:H7L8dtDRk0P1Qm6y0ji7MCYMQObJ5R9CRpyPhRUkLYA= +github.com/aws/smithy-go v1.21.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= -github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= -github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= -github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= -github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= -github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= -github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= -github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= -github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= -github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= -github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= -github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= -github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.10.0 h1:4EYhlDVEMsJ30nNj0mmgwIUXoq7e9sMJrVC2ED6QlCU= -github.com/jackc/pgconn v1.10.0/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= -github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= -github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= -github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= -github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= -github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= -github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= -github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= -github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.1.1 h1:7PQ/4gLoqnl87ZxL7xjO0DR5gYuviDCZxQJsUlFW1eI= -github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= -github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= -github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= -github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= -github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.8.1 h1:9k0IXtdJXHJbyAWQgbWr1lU+MEhPXZz6RIXxfR5oxXs= -github.com/jackc/pgtype v1.8.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= -github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= -github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= -github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= -github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= -github.com/jackc/pgx/v4 v4.13.0 h1:JCjhT5vmhMAf/YwBHLvrBn4OGdIQBiFG6ym8Zmdx570= -github.com/jackc/pgx/v4 v4.13.0/go.mod h1:9P4X524sErlaxj0XSGZk7s+LD0eOyu1ZDUrrpznYDF0= -github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= +github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/jinzhu/now v1.1.2 h1:eVKgfIdy9b6zbWBMgFpfDPoAMifwSZagU9HmEU6zgiI= -github.com/jinzhu/now v1.1.2/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= -github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= -github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= -github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= -github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= -github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0= -github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.10.0 h1:mXH0UwHS4D2HwWZa75im4xIQynLfblmWV7qcWpfv0yk= -github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= -golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211205182925-97ca703d548d h1:FjkYO/PPp4Wi0EAUOVLxePm7qVW4r4ctbWpURyuOD0E= -golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= -gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.2.2 h1:Ka9W6feOU+rPM9m007eYLMD4QoZuYGBnQ3Jp0faGSwg= -gorm.io/driver/postgres v1.2.2/go.mod h1:Ik3tK+a3FMp8ORZl29v4b3M0RsgXsaeMXh9s9eVMXco= -gorm.io/gorm v1.22.2/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= -gorm.io/gorm v1.22.3 h1:/JS6z+GStEQvJNW3t1FTwJwG/gZ+A7crFdRqtvG5ehA= -gorm.io/gorm v1.22.3/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.1/go.mod h1:WfXnOnwSqNtG62Y1CdjoMxh7r7u9QXGCkA1u0na2jgo= -k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= -k8s.io/client-go v0.23.1 h1:Ma4Fhf/p07Nmj9yAB1H7UwbFHEBrSPg8lviR24U2GiQ= -k8s.io/client-go v0.23.1/go.mod h1:6QSI8fEuqD4zgFK0xbdwfB/PthBsIxCJMa3s17WlcO0= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8= +gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI= +gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= +gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/kind-stig.test.yaml b/hack/kind-stig.test.yaml new file mode 100644 index 000000000..5051277c7 --- /dev/null +++ b/hack/kind-stig.test.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + metadata: + labels: + app: kube-bench + spec: + hostPID: true + containers: + - name: kube-bench + image: docker.io/aquasec/kube-bench:latest + command: [ + "kube-bench", + "run", + "--benchmark", + "eks-stig-kubernetes-v1r6", + ] + volumeMounts: + - name: var-lib-etcd + mountPath: /var/lib/etcd + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + - name: etc-systemd + mountPath: /etc/systemd + - name: etc-kubernetes + mountPath: /etc/kubernetes + # /usr/local/mount-from-host/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. + # You can omit this mount if you specify --version as part of the command. + - name: usr-bin + mountPath: /usr/local/mount-from-host/bin + - name: kind-bin + mountPath: /kind/bin + restartPolicy: Never + volumes: + - name: var-lib-etcd + hostPath: + path: "/var/lib/etcd" + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" + - name: usr-bin + hostPath: + path: "/usr/bin" + - name: kind-bin + hostPath: + path: "/kind/bin" diff --git a/hack/kind-stig.yaml b/hack/kind-stig.yaml new file mode 100644 index 000000000..3b1ab69d5 --- /dev/null +++ b/hack/kind-stig.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + metadata: + labels: + app: kube-bench + spec: + hostPID: true + containers: + - name: kube-bench + image: docker.io/aquasec/kube-bench:${VERSION} + command: [ + "kube-bench", + "run", + "--benchmark", + "eks-stig-kubernetes-v1r6", + ] + volumeMounts: + - name: var-lib-etcd + mountPath: /var/lib/etcd + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + - name: etc-systemd + mountPath: /etc/systemd + - name: etc-kubernetes + mountPath: /etc/kubernetes + # /usr/local/mount-from-host/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. + # You can omit this mount if you specify --version as part of the command. + - name: usr-bin + mountPath: /usr/local/mount-from-host/bin + - name: kind-bin + mountPath: /kind/bin + restartPolicy: Never + volumes: + - name: var-lib-etcd + hostPath: + path: "/var/lib/etcd" + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" + - name: usr-bin + hostPath: + path: "/usr/bin" + - name: kind-bin + hostPath: + path: "/kind/bin" diff --git a/hack/kind.yaml b/hack/kind.yaml index 0cf3888d4..7f088f150 100644 --- a/hack/kind.yaml +++ b/hack/kind.yaml @@ -12,7 +12,7 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:${VERSION} + image: docker.io/aquasec/kube-bench:${VERSION} command: ["kube-bench"] volumeMounts: - name: var-lib-etcd diff --git a/integration/testdata/Expected_output.data b/integration/testdata/Expected_output.data index 1dd154e0d..934f90ec6 100644 --- a/integration/testdata/Expected_output.data +++ b/integration/testdata/Expected_output.data @@ -221,8 +221,8 @@ minimum. [INFO] 4.1 Worker Node Configuration Files [PASS] 4.1.1 Ensure that the kubelet service file permissions are set to 644 or more restrictive (Automated) [PASS] 4.1.2 Ensure that the kubelet service file ownership is set to root:root (Automated) -[PASS] 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual) -[PASS] 4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root (Manual) +[WARN] 4.1.3 If proxy kubeconfig file exists ensure permissions are set to 644 or more restrictive (Manual) +[WARN] 4.1.4 If proxy kubeconfig file exists ensure ownership is set to root:root (Manual) [PASS] 4.1.5 Ensure that the --kubeconfig kubelet.conf file permissions are set to 644 or more restrictive (Automated) [PASS] 4.1.6 Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated) [PASS] 4.1.7 Ensure that the certificate authorities file permissions are set to 644 or more restrictive (Manual) @@ -240,11 +240,18 @@ minimum. [PASS] 4.2.8 Ensure that the --hostname-override argument is not set (Manual) [WARN] 4.2.9 Ensure that the --event-qps argument is set to 0 or a level which ensures appropriate event capture (Manual) [WARN] 4.2.10 Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual) -[PASS] 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Manual) +[PASS] 4.2.11 Ensure that the --rotate-certificates argument is not set to false (Automated) [PASS] 4.2.12 Verify that the RotateKubeletServerCertificate argument is set to true (Manual) [WARN] 4.2.13 Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual) == Remediations node == +4.1.3 Run the below command (based on the file location on your system) on the each worker node. +For example, +chmod 644 /etc/kubernetes/proxy.conf + +4.1.4 Run the below command (based on the file location on your system) on the each worker node. +For example, chown root:root /etc/kubernetes/proxy.conf + 4.2.6 If using a Kubelet config file, edit the file to set protectKernelDefaults: true. If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and @@ -274,7 +281,7 @@ Based on your system, restart the kubelet service. For example: systemctl daemon-reload systemctl restart kubelet.service -4.2.13 If using a Kubelet config file, edit the file to set TLSCipherSuites: to +4.2.13 If using a Kubelet config file, edit the file to set tlsCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file @@ -287,9 +294,9 @@ systemctl restart kubelet.service == Summary node == -19 checks PASS +17 checks PASS 1 checks FAIL -3 checks WARN +5 checks WARN 0 checks INFO [INFO] 5 Kubernetes Policies @@ -419,8 +426,8 @@ resources and that all new resources are created in a specific namespace. 0 checks INFO == Summary total == -69 checks PASS +67 checks PASS 11 checks FAIL -43 checks WARN +45 checks WARN 0 checks INFO diff --git a/integration/testdata/Expected_output_stig.data b/integration/testdata/Expected_output_stig.data new file mode 100644 index 000000000..167dd030c --- /dev/null +++ b/integration/testdata/Expected_output_stig.data @@ -0,0 +1,266 @@ +[INFO] 1 Control Plane Components + +== Summary master == +0 checks PASS +0 checks FAIL +0 checks WARN +0 checks INFO + +[INFO] 2 Control Plane Configuration +[INFO] 2.1 DISA Category Code I +[FAIL] V-242390 The Kubernetes API server must have anonymous authentication disabled (Automated) +[FAIL] V-242400 The Kubernetes API server must have Alpha APIs disabled (Automated) +[INFO] 2.2 DISA Category Code II +[WARN] V-242381 The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual) +[WARN] V-242402 The Kubernetes API Server must have an audit log path set (Manual) +[WARN] V-242403 Kubernetes API Server must generate audit records (Manual) +[WARN] V-242461 Kubernetes API Server audit logs must be enabled. (Manual) +[WARN] V-242462 The Kubernetes API Server must be set to audit log max size. (Manual) +[WARN] V-242463 The Kubernetes API Server must be set to audit log maximum backup. (Manual) +[WARN] V-242464 The Kubernetes API Server audit log retention must be set. (Manual) +[WARN] V-242465 The Kubernetes API Server audit log path must be set. (Manual) +[WARN] V-242443 Kubernetes must contain the latest updates as authorized by IAVMs, CTOs, DTMs, and STIGs. (Manual) + +== Remediations controlplane == +V-242390 If using a Kubelet config file, edit $kubeletconf to set authentication: anonymous: enabled to +false. +If using executable arguments, edit the kubelet service file +$kubeletsvc on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--anonymous-auth=false +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +V-242400 Edit any manifest files or $kubeletconf that contain the feature-gates +setting with AllAlpha set to "true". +Set the flag to "false" or remove the "AllAlpha" setting +completely. Restart the kubelet service if the kubelet config file +if the kubelet config file is changed. + +V-242381 Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +V-242402 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242403 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242461 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242462 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242463 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242464 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242465 Enable control plane logging for API Server, Audit, Authenticator, Controller Manager, and Scheduler. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html + +V-242443 Upgrade Kubernetes to a supported version. +Ref: https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html + + +== Summary controlplane == +0 checks PASS +2 checks FAIL +9 checks WARN +0 checks INFO + +[INFO] 3 Worker Node Security Configuration +[INFO] 3.1 DISA Category Code I +[WARN] V-242387 The Kubernetes Kubelet must have the read-only port flag disabled (Manual) +[PASS] V-242391 The Kubernetes Kubelet must have anonymous authentication disabled (Automated) +[PASS] V-242392 The Kubernetes kubelet must enable explicit authorization (Automated) +[FAIL] V-242397 The Kubernetes kubelet static PodPath must not enable static pods (Automated) +[WARN] V-242415 Secrets in Kubernetes must not be stored as environment variables.(Manual) +[FAIL] V-242434 Kubernetes Kubelet must enable kernel protection (Automated) +[PASS] V-242435 Kubernetes must prevent non-privileged users from executing privileged functions (Automated) +[FAIL] V-242393 Kubernetes Worker Nodes must not have sshd service running. (Automated) +[FAIL] V-242394 Kubernetes Worker Nodes must not have the sshd service enabled. (Automated) +[WARN] V-242395 Kubernetes dashboard must not be enabled. (Manual) +[PASS] V-242398 Kubernetes DynamicAuditing must not be enabled. (Automated) +[PASS] V-242399 Kubernetes DynamicKubeletConfig must not be enabled. (Automated) +[PASS] V-242404 Kubernetes Kubelet must deny hostname override (Automated) +[PASS] V-242406 The Kubernetes kubelet configuration file must be owned by root (Automated) +[PASS] V-242407 The Kubernetes kubelet configuration files must have file permissions set to 644 or more restrictive (Automated) +[WARN] V-242414 The Kubernetes cluster must use non-privileged host ports for user pods. (Manual) +[WARN] V-242442 Kubernetes must remove old components after updated versions have been installed. (Manual) +[WARN] V-242396 Kubernetes Kubectl cp command must give expected access and results. (Manual) + +== Remediations node == +V-242387 If using a Kubelet config file, edit /var/lib/kubelet/config.yaml to set readOnlyPort to 0. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--read-only-port=0 +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +V-242397 Edit /var/lib/kubelet/config.yaml on each node to to remove the staticPodPath +Based on your system, restart the kubelet service. For example, +systemctl daemon-reload +systemctl restart kubelet.service + +V-242415 Run the following command: +kubectl get all -o jsonpath='{range .items[?(@..secretKeyRef)]} {.kind} {.metadata.name} {"\n"}{end}' -A +If any of the values returned reference environment variables +rewrite application code to read secrets from mounted secret files, rather than +from environment variables. + +V-242434 If using a Kubelet config file, edit /var/lib/kubelet/config.yaml to set protectKernelDefaults: true. +If using command line arguments, edit the kubelet service file +/etc/systemd/system/kubelet.service.d/10-kubeadm.conf on each worker node and +set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. +--protect-kernel-defaults=true +Based on your system, restart the kubelet service. For example: +systemctl daemon-reload +systemctl restart kubelet.service + +V-242393 To stop the sshd service, run the command: systemctl stop sshd + +V-242394 To disable the sshd service, run the command: + chkconfig sshd off + +V-242395 Run the command: kubectl get pods --all-namespaces -l k8s-app=kubernetes-dashboard +If any resources are returned, this is a finding. +Fix Text: Delete the Kubernetes dashboard deployment with the following command: + kubectl delete deployment kubernetes-dashboard --namespace=kube-system + +V-242414 For any of the pods that are using ports below 1024, +reconfigure the pod to use a service to map a host non-privileged +port to the pod port or reconfigure the image to use non-privileged ports. + +V-242442 To view all pods and the images used to create the pods, from the Master node, run the following command: + kubectl get pods --all-namespaces -o jsonpath="{..image}" | \ + tr -s '[[:space:]]' '\n' | \ + sort | \ + uniq -c + Review the images used for pods running within Kubernetes. + Remove any old pods that are using older images. + +V-242396 If any Worker nodes are not using kubectl version 1.12.9 or newer, this is a finding. +Upgrade the Master and Worker nodes to the latest version of kubectl. + + +== Summary node == +8 checks PASS +4 checks FAIL +6 checks WARN +0 checks INFO + +[INFO] 4 Policies +[INFO] 4.1 Policies - DISA Category Code I +[WARN] V-242381 The Kubernetes Controller Manager must create unique service accounts for each work payload. (Manual) +[WARN] V-242383 User-managed resources must be created in dedicated namespaces. (Manual) +[WARN] V-242417 Kubernetes must separate user functionality. (Manual) + +== Remediations policies == +V-242381 Create explicit service accounts wherever a Kubernetes workload requires specific access +to the Kubernetes API server. +Modify the configuration of each default service account to include this value +automountServiceAccountToken: false + +V-242383 Move any user-managed resources from the default, kube-public and kube-node-lease namespaces, to user namespaces. + +V-242417 Move any user pods that are present in the Kubernetes system namespaces to user specific namespaces. + + +== Summary policies == +0 checks PASS +0 checks FAIL +3 checks WARN +0 checks INFO + +[INFO] 5 Managed Services +[INFO] 5.1 DISA Category Code I +[INFO] V-242386 The Kubernetes API server must have the insecure port flag disabled | Component of EKS Control Plane +[INFO] V-242388 The Kubernetes API server must have the insecure bind address not set | Component of EKS Control Plane +[WARN] V-242436 The Kubernetes API server must have the ValidatingAdmissionWebhook enabled (manual) +[INFO] V-245542 Kubernetes API Server must disable basic authentication to protect information in transit | Component of EKS Control Plane +[INFO] 5.2 DISA Category Code II +[INFO] V-242376 The Kubernetes Controller Manager must use TLS 1.2, at a minimum | Component of EKS Control Plane +[INFO] V-242377 The Kubernetes Scheduler must use TLS 1.2, at a minimum | Component of EKS Control Plane +[INFO] V-242378 The Kubernetes API Server must use TLS 1.2, at a minimum | Component of EKS Control Plane +[INFO] V-242379 The Kubernetes etcd must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of EKS Control Plane +[INFO] V-242380 The Kubernetes etcd must use TLS to protect the confidentiality of sensitive data during electronic dissemination | Component of EKS Control Plane +[INFO] V-242382 The Kubernetes API Server must enable Node,RBAC as the authorization mode | Component of EKS Control Plane +[INFO] V-242384 The Kubernetes Scheduler must have secure binding | Component of EKS Control Plane +[INFO] V-242385 The Kubernetes Controller Manager must have secure binding | Component of EKS Control Plane +[INFO] V-242389 The Kubernetes API server must have the secure port set | Component of EKS Control Plane +[INFO] V-242401 The Kubernetes API Server must have an audit policy set | Component of EKS Control Plane +[INFO] V-242402 The Kubernetes API Server must have an audit log path set | Component of EKS Control Plane +[INFO] V-242403 Kubernetes API Server must generate audit records | Component of EKS Control Plane +[INFO] V-242405 The Kubernetes manifests must be owned by root | Component of EKS Control Plane +[INFO] V-242408 The Kubernetes manifests must have least privileges | Component of EKS Control Plane +[INFO] V-242409 Kubernetes Controller Manager must disable profiling | Component of EKS Control Plane +[INFO] V-242410 The Kubernetes API Server must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane +[INFO] V-242411 The Kubernetes Scheduler must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane +[INFO] V-242412 The Kubernetes Controllers must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane +[INFO] V-242413 The Kubernetes etcd must enforce PPS that adhere to PPSM CAL | Component of EKS Control Plane +[INFO] V-242418 The Kubernetes API server must use approved cipher suites | Component of EKS Control Plane +[INFO] V-242419 Kubernetes API Server must have the SSL Certificate Authority set | Component of EKS Control Plane +[INFO] V-242420 Kubernetes Kubelet must have the SSL Certificate Authority set | Component of EKS Control Plane +[INFO] V-242421 Kubernetes Controller Manager must have the SSL Certificate Authority set | Component of EKS Control Plane +[INFO] V-242422 Kubernetes API Server must have a certificate for communication | Component of EKS Control Plane +[INFO] V-242423 Kubernetes etcd must enable client authentication to secure service | Component of EKS Control Plane +[INFO] V-242424 Kubernetes etcd must enable client authentication to secure service | Component of EKS Control Plane +[INFO] V-242425 Kubernetes Kubelet must enable tls-cert-file for client authentication to secure service | Component of EKS Control Plane +[INFO] V-242426 Kubernetes etcd must enable client authentication to secure service | Component of EKS Control Plane +[INFO] V-242427 Kubernetes etcd must have a key file for secure communication | Component of EKS Control Plane +[INFO] V-242428 Kubernetes etcd must have a certificate for communication | Component of EKS Control Plane +[INFO] V-242429 Kubernetes etcd must have the SSL Certificate Authority set | Component of EKS Control Plane +[INFO] V-242430 Kubernetes etcd must have a certificate for communication | Component of EKS Control Plane +[INFO] V-242431 Kubernetes etcd must have a key file for secure communication | Component of EKS Control Plane +[INFO] V-242432 Kubernetes etcd must have peer-cert-file set for secure communication | Component of EKS Control Plane +[INFO] V-242433 Kubernetes etcd must have a peer-key-file set for secure communication | Component of EKS Control Plane +[INFO] V-242438 Kubernetes API Server must configure timeouts to limit attack surface | Component of EKS Control Plane +[INFO] V-242444 The Kubernetes component manifests must be owned by root | Component of EKS Control Plane +[INFO] V-242445 The Kubernetes component etcd must be owned by etcd | Component of EKS Control Plane +[INFO] V-242446 The Kubernetes conf files must be owned by root | Component of EKS Control Plane +[INFO] V-242447 The Kubernetes Kube Proxy must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242448 The Kubernetes Kube Proxy must be owned by root | Component of EKS Control Plane +[INFO] V-242449 The Kubernetes Kubelet certificate authority file must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242450 The Kubernetes Kubelet certificate authority must be owned by root | Component of EKS Control Plane +[INFO] V-242451 The Kubernetes component PKI must be owned by root | Component of EKS Control Plane +[INFO] V-242452 The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242453 The Kubernetes kubelet config must be owned by root | Component of EKS Control Plane +[INFO] V-242454 The Kubernetes kubeadm.conf must be owned by root | Component of EKS Control Plane +[INFO] V-242455 The Kubernetes kubeadm.conf must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242456 The Kubernetes kubelet config must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242457 The Kubernetes kubelet config must be owned by root | Component of EKS Control Plane +[INFO] V-242458 The Kubernetes API Server must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242459 The Kubernetes etcd must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242460 The Kubernetes admin.conf must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242466 The Kubernetes PKI CRT must have file permissions set to 644 or more restrictive | Component of EKS Control Plane +[INFO] V-242467 The Kubernetes PKI keys must have file permissions set to 600 or more restrictive | Component of EKS Control Plane +[INFO] V-242468 The Kubernetes API Server must prohibit communication using TLS version 1.0 and 1.1, and SSL 2.0 and 3.0 | Component of EKS Control Plane +[INFO] V-245541 Kubernetes Kubelet must not disable timeouts | Component of EKS Control Plane +[INFO] V-245543 Kubernetes API Server must disable token authentication to protect information in transit | Component of EKS Control Plane +[INFO] V-245544 Kubernetes endpoints must use approved organizational certificate and key pair to protect information in transit | Component of EKS Control Plane + +== Remediations managedservices == +V-242436 Amazon EKS version 1.18 and later automatically enable ValidatingAdmissionWebhook +Ref: https://docs.aws.amazon.com/eks/latest/userguide/platform-versions.html + + +== Summary managedservices == +0 checks PASS +0 checks FAIL +1 checks WARN +62 checks INFO + +== Summary total == +8 checks PASS +6 checks FAIL +19 checks WARN +62 checks INFO + diff --git a/internal/findings/publisher.go b/internal/findings/publisher.go index 4661cc254..4f960afdb 100644 --- a/internal/findings/publisher.go +++ b/internal/findings/publisher.go @@ -1,14 +1,16 @@ package findings import ( - "github.com/aws/aws-sdk-go/service/securityhub" - "github.com/aws/aws-sdk-go/service/securityhub/securityhubiface" + "context" + + "github.com/aws/aws-sdk-go-v2/service/securityhub" + "github.com/aws/aws-sdk-go-v2/service/securityhub/types" "github.com/pkg/errors" ) // A Publisher represents an object that publishes finds to AWS Security Hub. type Publisher struct { - client securityhubiface.SecurityHubAPI // AWS Security Hub Service Client + client securityhub.Client // AWS Security Hub Service Client } // A PublisherOutput represents an object that contains information about the service call. @@ -16,26 +18,26 @@ type PublisherOutput struct { // The number of findings that failed to import. // // FailedCount is a required field - FailedCount int64 + FailedCount int32 // The list of findings that failed to import. - FailedFindings []*securityhub.ImportFindingsError + FailedFindings []types.ImportFindingsError // The number of findings that were successfully imported. // // SuccessCount is a required field - SuccessCount int64 + SuccessCount int32 } // New creates a new Publisher. -func New(client securityhubiface.SecurityHubAPI) *Publisher { +func New(client securityhub.Client) *Publisher { return &Publisher{ client: client, } } // PublishFinding publishes findings to AWS Security Hub Service -func (p *Publisher) PublishFinding(finding []*securityhub.AwsSecurityFinding) (*PublisherOutput, error) { +func (p *Publisher) PublishFinding(finding []types.AwsSecurityFinding) (*PublisherOutput, error) { o := PublisherOutput{} i := securityhub.BatchImportFindingsInput{} i.Findings = finding @@ -45,24 +47,20 @@ func (p *Publisher) PublishFinding(finding []*securityhub.AwsSecurityFinding) (* batch := 100 for i := 0; i < len(finding); i += batch { - j := i + batch - if j > len(finding) { - j = len(finding) - } i := securityhub.BatchImportFindingsInput{} i.Findings = finding - r, err := p.client.BatchImportFindings(&i) // Process the batch. + r, err := p.client.BatchImportFindings(context.Background(), &i) // Process the batch. if err != nil { errs = errors.Wrap(err, "finding publish failed") } - if r.FailedCount != nil { - o.FailedCount += *r.FailedCount - } - if r.SuccessCount != nil { - o.SuccessCount += *r.SuccessCount - } - for _, ff := range r.FailedFindings { - o.FailedFindings = append(o.FailedFindings, ff) + if r != nil { + if *r.FailedCount != 0 { + o.FailedCount += *r.FailedCount + } + if *r.SuccessCount != 0 { + o.SuccessCount += *r.SuccessCount + } + o.FailedFindings = append(o.FailedFindings, r.FailedFindings...) } } return &o, errs diff --git a/internal/findings/publisher_test.go b/internal/findings/publisher_test.go deleted file mode 100644 index 2a5381c2a..000000000 --- a/internal/findings/publisher_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package findings - -import ( - "testing" - - "github.com/aws/aws-sdk-go/service/securityhub" - "github.com/aws/aws-sdk-go/service/securityhub/securityhubiface" -) - -// Define a mock struct to be used in your unit tests of myFunc. -type MockSHClient struct { - securityhubiface.SecurityHubAPI - Batches int - NumberOfFinding int -} - -func NewMockSHClient() *MockSHClient { - return &MockSHClient{} -} - -func (m *MockSHClient) BatchImportFindings(input *securityhub.BatchImportFindingsInput) (*securityhub.BatchImportFindingsOutput, error) { - o := securityhub.BatchImportFindingsOutput{} - m.Batches++ - m.NumberOfFinding = len(input.Findings) - return &o, nil -} - -func TestPublisher_publishFinding(t *testing.T) { - type fields struct { - client *MockSHClient - } - type args struct { - finding []*securityhub.AwsSecurityFinding - } - tests := []struct { - name string - fields fields - args args - wantBatchCount int - wantFindingCount int - }{ - {"Test single finding", fields{NewMockSHClient()}, args{makeFindings(1)}, 1, 1}, - {"Test 150 finding should return 2 batches", fields{NewMockSHClient()}, args{makeFindings(150)}, 2, 150}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - p := New(tt.fields.client) - p.PublishFinding(tt.args.finding) - if tt.fields.client.NumberOfFinding != tt.wantFindingCount { - t.Errorf("Publisher.publishFinding() want = %v, got %v", tt.wantFindingCount, tt.fields.client.NumberOfFinding) - } - if tt.fields.client.Batches != tt.wantBatchCount { - t.Errorf("Publisher.publishFinding() want = %v, got %v", tt.wantBatchCount, tt.fields.client.Batches) - } - }) - } -} - -func makeFindings(count int) []*securityhub.AwsSecurityFinding { - var findings []*securityhub.AwsSecurityFinding - - for i := 0; i < count; i++ { - t := securityhub.AwsSecurityFinding{} - findings = append(findings, &t) - - } - return findings -} diff --git a/job-ack.yaml b/job-ack.yaml index ecc18199c..f34f6dd04 100644 --- a/job-ack.yaml +++ b/job-ack.yaml @@ -9,8 +9,16 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:latest - command: ["kube-bench", "run", "--targets", "node,policies,managedservices", "--benchmark", "ack-1.0"] + image: docker.io/aquasec/kube-bench:latest + command: + [ + "kube-bench", + "run", + "--targets", + "node,policies,managedservices", + "--benchmark", + "ack-1.0", + ] volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet diff --git a/job-aks.yaml b/job-aks.yaml index 329c86b2c..652a4a5ef 100644 --- a/job-aks.yaml +++ b/job-aks.yaml @@ -9,8 +9,9 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:latest - command: ["kube-bench", "run", "--targets", "node", "--benchmark", "aks-1.0"] + image: docker.io/aquasec/kube-bench:latest + command: + ["kube-bench", "run", "--targets", "node", "--benchmark", "aks-1.0"] volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet diff --git a/job-eks-asff.yaml b/job-eks-asff.yaml index 426c5484a..7c16bba56 100644 --- a/job-eks-asff.yaml +++ b/job-eks-asff.yaml @@ -32,8 +32,22 @@ spec: - name: kube-bench # Push the image to your ECR and then refer to it here # image: - image: aquasec/kube-bench:latest - command: ["kube-bench", "run", "--targets", "node", "--benchmark", "eks-1.0.1", "--asff"] + image: docker.io/aquasec/kube-bench:latest + command: + [ + "kube-bench", + "run", + "--targets", + "node", + "--benchmark", + "eks-1.2.0", + "--asff", + ] + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet @@ -45,7 +59,7 @@ spec: mountPath: /etc/kubernetes readOnly: true - name: kube-bench-eks-config - mountPath: "/opt/kube-bench/cfg/eks-1.0.1/config.yaml" + mountPath: "/opt/kube-bench/cfg/eks-1.2.0/config.yaml" subPath: config.yaml readOnly: true restartPolicy: Never diff --git a/job-eks-stig.yaml b/job-eks-stig.yaml new file mode 100644 index 000000000..65ce5dc9a --- /dev/null +++ b/job-eks-stig.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + spec: + hostPID: true + containers: + - name: kube-bench + # Push the image to your ECR and then refer to it here + # image: + image: docker.io/aquasec/kube-bench:latest + # To send findings to AWS Security Hub, refer to `job-eks-asff.yaml` instead + command: + [ + "kube-bench", + "run", + "--benchmark", + "eks-stig-kubernetes-v1r6", + ] + volumeMounts: + - name: var-lib-kubelet + mountPath: /var/lib/kubelet + readOnly: true + - name: etc-systemd + mountPath: /etc/systemd + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + restartPolicy: Never + volumes: + - name: var-lib-kubelet + hostPath: + path: "/var/lib/kubelet" + - name: etc-systemd + hostPath: + path: "/etc/systemd" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" diff --git a/job-eks.yaml b/job-eks.yaml index ed269e06a..beaf39194 100644 --- a/job-eks.yaml +++ b/job-eks.yaml @@ -11,9 +11,17 @@ spec: - name: kube-bench # Push the image to your ECR and then refer to it here # image: - image: aquasec/kube-bench:latest + image: docker.io/aquasec/kube-bench:latest # To send findings to AWS Security Hub, refer to `job-eks-asff.yaml` instead - command: ["kube-bench", "run", "--targets", "node", "--benchmark", "eks-1.0.1"] + command: + [ + "kube-bench", + "run", + "--targets", + "node", + "--benchmark", + "eks-1.2.0", + ] volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet diff --git a/job-gke.yaml b/job-gke.yaml index 2e6d1e956..c87b18492 100644 --- a/job-gke.yaml +++ b/job-gke.yaml @@ -9,8 +9,16 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:latest - command: ["kube-bench", "run", "--targets", "node,policies,managedservices", "--benchmark", "gke-1.2.0"] + image: docker.io/aquasec/kube-bench:latest + command: + [ + "kube-bench", + "run", + "--targets", + "node,policies,managedservices", + "--benchmark", + "gke-1.2.0", + ] volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet diff --git a/job-iks.yaml b/job-iks.yaml index a67ad407f..f8092d094 100644 --- a/job-iks.yaml +++ b/job-iks.yaml @@ -9,8 +9,9 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:latest - command: ["kube-bench", "run", "--targets", "node", "--version", "1.20"] + image: docker.io/aquasec/kube-bench:latest + command: + ["kube-bench", "run", "--targets", "node", "--version", "1.20"] volumeMounts: - name: var-lib-kubelet mountPath: /var/lib/kubelet diff --git a/job-master.yaml b/job-master.yaml index e3be12b58..4df64d3dc 100644 --- a/job-master.yaml +++ b/job-master.yaml @@ -7,17 +7,31 @@ spec: template: spec: hostPID: true - nodeSelector: - node-role.kubernetes.io/master: "" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists tolerations: - key: node-role.kubernetes.io/master operator: Exists effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule containers: - name: kube-bench - image: aquasec/kube-bench:latest + image: docker.io/aquasec/kube-bench:latest command: ["kube-bench", "run", "--targets", "master"] volumeMounts: + - name: var-lib-cni + mountPath: /var/lib/cni + readOnly: true - name: var-lib-etcd mountPath: /var/lib/etcd readOnly: true @@ -61,6 +75,9 @@ spec: readOnly: true restartPolicy: Never volumes: + - name: var-lib-cni + hostPath: + path: "/var/lib/cni" - name: var-lib-etcd hostPath: path: "/var/lib/etcd" diff --git a/job-node.yaml b/job-node.yaml index b45231775..1aa7023df 100644 --- a/job-node.yaml +++ b/job-node.yaml @@ -9,9 +9,12 @@ spec: hostPID: true containers: - name: kube-bench - image: aquasec/kube-bench:latest + image: docker.io/aquasec/kube-bench:latest command: ["kube-bench", "run", "--targets", "node"] volumeMounts: + - name: var-lib-cni + mountPath: /var/lib/cni + readOnly: true - name: var-lib-etcd mountPath: /var/lib/etcd readOnly: true @@ -49,6 +52,9 @@ spec: readOnly: true restartPolicy: Never volumes: + - name: var-lib-cni + hostPath: + path: "/var/lib/cni" - name: var-lib-etcd hostPath: path: "/var/lib/etcd" diff --git a/job-tkgi.yaml b/job-tkgi.yaml new file mode 100644 index 000000000..3ac760c9f --- /dev/null +++ b/job-tkgi.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: kube-bench +spec: + template: + spec: + hostPID: true + containers: + - name: kube-bench + image: docker.io/aquasec/kube-bench:latest + command: + [ + "kube-bench", + "run", + "--targets", + "node,policies", + "--benchmark", + "tkgi-1.2.53", + ] + volumeMounts: + - name: var-vcap-jobs + mountPath: /var/vcap/jobs + readOnly: true + - name: var-vcap-packages + mountPath: /var/vcap/packages + readOnly: true + - name: var-vcap-store-etcd + mountPath: /var/vcap/store/etcd + readOnly: true + - name: var-vcap-sys + mountPath: /var/vcap/sys + readOnly: true + - name: etc-kubernetes + mountPath: /etc/kubernetes + readOnly: true + restartPolicy: Never + volumes: + - name: var-vcap-jobs + hostPath: + path: "/var/vcap/jobs" + - name: var-vcap-packages + hostPath: + path: "/var/vcap/packages" + - name: var-vcap-store-etcd + hostPath: + path: "/var/vcap/store/etcd" + - name: var-vcap-sys + hostPath: + path: "/var/vcap/sys" + - name: etc-kubernetes + hostPath: + path: "/etc/kubernetes" diff --git a/job.yaml b/job.yaml index 739fb9b78..1a22637ff 100644 --- a/job.yaml +++ b/job.yaml @@ -9,79 +9,83 @@ spec: labels: app: kube-bench spec: - hostPID: true containers: - - name: kube-bench - image: aquasec/kube-bench:0.6.3 - command: ["kube-bench"] + - command: ["kube-bench"] + image: docker.io/aquasec/kube-bench:v0.9.1 + name: kube-bench volumeMounts: - - name: var-lib-etcd - mountPath: /var/lib/etcd + - name: var-lib-cni + mountPath: /var/lib/cni + readOnly: true + - mountPath: /var/lib/etcd + name: var-lib-etcd readOnly: true - - name: var-lib-kubelet - mountPath: /var/lib/kubelet + - mountPath: /var/lib/kubelet + name: var-lib-kubelet readOnly: true - - name: var-lib-kube-scheduler - mountPath: /var/lib/kube-scheduler + - mountPath: /var/lib/kube-scheduler + name: var-lib-kube-scheduler readOnly: true - - name: var-lib-kube-controller-manager - mountPath: /var/lib/kube-controller-manager + - mountPath: /var/lib/kube-controller-manager + name: var-lib-kube-controller-manager readOnly: true - - name: etc-systemd - mountPath: /etc/systemd + - mountPath: /etc/systemd + name: etc-systemd readOnly: true - - name: lib-systemd - mountPath: /lib/systemd/ + - mountPath: /lib/systemd/ + name: lib-systemd readOnly: true - - name: srv-kubernetes - mountPath: /srv/kubernetes/ + - mountPath: /srv/kubernetes/ + name: srv-kubernetes readOnly: true - - name: etc-kubernetes - mountPath: /etc/kubernetes + - mountPath: /etc/kubernetes + name: etc-kubernetes readOnly: true - # /usr/local/mount-from-host/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. - # You can omit this mount if you specify --version as part of the command. - - name: usr-bin - mountPath: /usr/local/mount-from-host/bin + - mountPath: /usr/local/mount-from-host/bin + name: usr-bin readOnly: true - - name: etc-cni-netd - mountPath: /etc/cni/net.d/ + - mountPath: /etc/cni/net.d/ + name: etc-cni-netd readOnly: true - - name: opt-cni-bin - mountPath: /opt/cni/bin/ + - mountPath: /opt/cni/bin/ + name: opt-cni-bin readOnly: true + hostPID: true restartPolicy: Never volumes: - - name: var-lib-etcd - hostPath: - path: "/var/lib/etcd" - - name: var-lib-kubelet - hostPath: - path: "/var/lib/kubelet" - - name: var-lib-kube-scheduler - hostPath: - path: "/var/lib/kube-scheduler" - - name: var-lib-kube-controller-manager - hostPath: - path: "/var/lib/kube-controller-manager" - - name: etc-systemd - hostPath: - path: "/etc/systemd" - - name: lib-systemd - hostPath: - path: "/lib/systemd" - - name: srv-kubernetes - hostPath: - path: "/srv/kubernetes" - - name: etc-kubernetes - hostPath: - path: "/etc/kubernetes" - - name: usr-bin - hostPath: - path: "/usr/bin" - - name: etc-cni-netd - hostPath: - path: "/etc/cni/net.d/" - - name: opt-cni-bin + - name: var-lib-cni hostPath: - path: "/opt/cni/bin/" + path: /var/lib/cni + - hostPath: + path: /var/lib/etcd + name: var-lib-etcd + - hostPath: + path: /var/lib/kubelet + name: var-lib-kubelet + - hostPath: + path: /var/lib/kube-scheduler + name: var-lib-kube-scheduler + - hostPath: + path: /var/lib/kube-controller-manager + name: var-lib-kube-controller-manager + - hostPath: + path: /etc/systemd + name: etc-systemd + - hostPath: + path: /lib/systemd + name: lib-systemd + - hostPath: + path: /srv/kubernetes + name: srv-kubernetes + - hostPath: + path: /etc/kubernetes + name: etc-kubernetes + - hostPath: + path: /usr/bin + name: usr-bin + - hostPath: + path: /etc/cni/net.d/ + name: etc-cni-netd + - hostPath: + path: /opt/cni/bin/ + name: opt-cni-bin diff --git a/makefile b/makefile index 5945e1a47..bcab5c3fa 100644 --- a/makefile +++ b/makefile @@ -4,12 +4,15 @@ DOCKER_ORG ?= aquasec VERSION ?= $(shell git rev-parse --short=7 HEAD) KUBEBENCH_VERSION ?= $(shell git describe --tags --abbrev=0) IMAGE_NAME ?= $(DOCKER_ORG)/$(BINARY):$(VERSION) +IMAGE_NAME_UBI ?= $(DOCKER_ORG)/$(BINARY):$(VERSION)-ubi GOOS ?= linux BUILD_OS := linux uname := $(shell uname -s) -BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm +BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm,linux/ppc64le,linux/s390x DOCKER_ORGS ?= aquasec public.ecr.aws/aquasecurity GOARCH ?= $@ +KUBECTL_VERSION ?= 1.31.0 +ARCH ?= $(shell go env GOARCH) ifneq ($(findstring Microsoft,$(shell uname -r)),) BUILD_OS := windows @@ -38,12 +41,25 @@ build: $(BINARY) $(BINARY): $(SOURCES) GOOS=$(GOOS) CGO_ENABLED=0 go build -ldflags "-X github.com/aquasecurity/kube-bench/cmd.KubeBenchVersion=$(KUBEBENCH_VERSION)" -o $(BINARY) . +build-fips: + GOOS=$(GOOS) CGO_ENABLED=0 GOEXPERIMENT=boringcrypto go build -tags fipsonly -ldflags "-X github.com/aquasecurity/kube-bench/cmd.KubeBenchVersion=$(KUBEBENCH_VERSION)" -o $(BINARY) . + # builds the current dev docker version build-docker: docker build --build-arg BUILD_DATE=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ - --build-arg VCS_REF=$(VERSION) \ - --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ - -t $(IMAGE_NAME) . + --build-arg VCS_REF=$(VERSION) \ + --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ + --build-arg KUBECTL_VERSION=$(KUBECTL_VERSION) \ + --build-arg TARGETARCH=$(ARCH) \ + -t $(IMAGE_NAME) . + +build-docker-ubi: + docker build -f Dockerfile.ubi --build-arg BUILD_DATE=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ + --build-arg VCS_REF=$(VERSION) \ + --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ + --build-arg KUBECTL_VERSION=$(KUBECTL_VERSION) \ + --build-arg TARGETARCH=$(ARCH) \ + -t $(IMAGE_NAME_UBI) . # unit tests tests: @@ -78,3 +94,15 @@ kind-run: kind-push kubectl wait --for=condition=complete job.batch/kube-bench --timeout=60s && \ kubectl logs job/kube-bench > ./test.data && \ diff ./test.data integration/testdata/Expected_output.data + +kind-run-stig: KUBECONFIG = "./kubeconfig.kube-bench" +kind-run-stig: kind-push + sed "s/\$${VERSION}/$(VERSION)/" ./hack/kind-stig.yaml > ./hack/kind-stig.test.yaml + kind get kubeconfig --name="$(KIND_PROFILE)" > $(KUBECONFIG) + -KUBECONFIG=$(KUBECONFIG) \ + kubectl delete job kube-bench + KUBECONFIG=$(KUBECONFIG) \ + kubectl apply -f ./hack/kind-stig.test.yaml && \ + kubectl wait --for=condition=complete job.batch/kube-bench --timeout=60s && \ + kubectl logs job/kube-bench > ./test.data && \ + diff ./test.data integration/testdata/Expected_output_stig.data