diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6d455a8eb..3bb078542 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -14,7 +14,7 @@ on: - "LICENSE" - "NOTICE" env: - GO_VERSION: "1.21" + GO_VERSION: "1.22.7" KIND_VERSION: "v0.11.1" KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6" @@ -32,7 +32,7 @@ jobs: - name: yaml-lint uses: ibiqlik/action-yamllint@v3 - name: Setup golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: version: v1.57.2 args: --verbose @@ -63,7 +63,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - name: Setup Kubernetes cluster (KIND) - uses: engineerd/setup-kind@v0.5.0 + uses: engineerd/setup-kind@v0.6.2 with: version: ${{ env.KIND_VERSION }} image: ${{ env.KIND_IMAGE }} @@ -95,7 +95,7 @@ jobs: with: fetch-depth: 0 - name: Dry-run release snapshot - uses: goreleaser/goreleaser-action@v5 + uses: goreleaser/goreleaser-action@v6 with: distribution: goreleaser version: v1.7.0 diff --git a/.github/workflows/mkdocs-deploy.yaml b/.github/workflows/mkdocs-deploy.yaml index 91c2954bf..898e7f13d 100644 --- a/.github/workflows/mkdocs-deploy.yaml +++ b/.github/workflows/mkdocs-deploy.yaml @@ -20,7 +20,7 @@ jobs: with: fetch-depth: 0 persist-credentials: true - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: 3.x - run: | diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 681ae0a02..ce8861a79 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -9,6 +9,7 @@ env: ALIAS: aquasecurity DOCKERHUB_ALIAS: aquasec REP: kube-bench + jobs: publish: name: Publish @@ -46,10 +47,13 @@ jobs: images: ${{ env.REP }} tag-semver: | {{version}} - + - name: Extract variables from makefile (kubectl) + id: extract_vars + run: | + echo "KUBECTL_VERSION=$(grep -oP '^KUBECTL_VERSION\s*\?=\s*\K.*' makefile)" >> $GITHUB_ENV - name: Build and push - Docker/ECR id: docker_build - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x @@ -57,6 +61,7 @@ jobs: push: true build-args: | KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + KUBECTL_VERSION=${{ env.KUBECTL_VERSION }} tags: | ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }} public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }} @@ -67,7 +72,7 @@ jobs: - name: Build and push ubi image - Docker/ECR id: docker_build_ubi - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x @@ -76,6 +81,7 @@ jobs: file: Dockerfile.ubi build-args: | KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + KUBECTL_VERSION=${{ env.KUBECTL_VERSION }} tags: | ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi @@ -86,7 +92,7 @@ jobs: - name: Build and push fips ubi image - Docker/ECR id: docker_build_fips_ubi - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . platforms: linux/amd64,linux/arm64,linux/ppc64le,linux/s390x @@ -95,6 +101,7 @@ jobs: file: Dockerfile.fips.ubi build-args: | KUBEBENCH_VERSION=${{ steps.get_version.outputs.version }} + KUBECTL_VERSION=${{ env.KUBECTL_VERSION }} tags: | ${{ env.DOCKERHUB_ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi-fips public.ecr.aws/${{ env.ALIAS }}/${{ env.REP }}:${{ steps.get_version.outputs.version }}-ubi-fips diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 4fc523f6f..2b99258db 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,7 +5,7 @@ on: tags: - "v*" env: - GO_VERSION: "1.21" + GO_VERSION: "1.22.7" KIND_VERSION: "v0.11.1" KIND_IMAGE: "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6" @@ -25,7 +25,7 @@ jobs: - name: Run unit tests run: make tests - name: Setup Kubernetes cluster (KIND) - uses: engineerd/setup-kind@v0.5.0 + uses: engineerd/setup-kind@v0.6.2 with: version: ${{ env.KIND_VERSION }} image: ${{ env.KIND_IMAGE }} @@ -44,7 +44,7 @@ jobs: second_file_path: integration/testdata/Expected_output.data expected_result: PASSED - name: Release - uses: goreleaser/goreleaser-action@v5 + uses: goreleaser/goreleaser-action@v6 with: distribution: goreleaser version: v1.7.0 diff --git a/Dockerfile b/Dockerfile index fb77e0493..30e33e572 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.2 AS build +FROM golang:1.23.2 AS build WORKDIR /go/src/github.com/aquasecurity/kube-bench/ COPY makefile makefile COPY go.mod go.sum ./ @@ -9,11 +9,20 @@ COPY internal/ internal/ ARG KUBEBENCH_VERSION RUN make build && cp kube-bench /go/bin/kube-bench -FROM alpine:3.20.0 AS run +# Add kubectl to run policies checks +ARG KUBECTL_VERSION TARGETARCH +RUN wget -O /usr/local/bin/kubectl "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl" +RUN wget -O kubectl.sha256 "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl.sha256" +# Verify kubectl sha256sum +RUN /bin/bash -c 'echo "$( + --key-file= + scored: true + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--client-cert-auth" + env: "ETCD_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --client-cert-auth="true" + scored: true + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + set: false + - flag: "--auto-tls" + env: "ETCD_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + --auto-tls=false + scored: true + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are + set as appropriate (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--peer-cert-file" + env: "ETCD_PEER_CERT_FILE" + - flag: "--peer-key-file" + env: "ETCD_PEER_KEY_FILE" + remediation: | + Follow the etcd service documentation and configure peer TLS encryption as appropriate + for your etcd cluster. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameters. + --peer-client-file= + --peer-key-file= + scored: true + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--peer-client-cert-auth" + env: "ETCD_PEER_CLIENT_CERT_AUTH" + compare: + op: eq + value: true + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and set the below parameter. + --peer-client-cert-auth=true + scored: true + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + set: false + - flag: "--peer-auto-tls" + env: "ETCD_PEER_AUTO_TLS" + compare: + op: eq + value: false + remediation: | + Edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + --peer-auto-tls=false + scored: true + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Manual)" + audit: "/bin/ps -ef | /bin/grep $etcdbin | /bin/grep -v grep" + tests: + test_items: + - flag: "--trusted-ca-file" + env: "ETCD_TRUSTED_CA_FILE" + remediation: | + [Manual test] + Follow the etcd documentation and create a dedicated certificate authority setup for the + etcd service. + Then, edit the etcd pod specification file $etcdconf on the + master node and set the below parameter. + --trusted-ca-file= + scored: false diff --git a/cfg/cis-1.9/master.yaml b/cfg/cis-1.9/master.yaml new file mode 100644 index 000000000..51b9ab5d1 --- /dev/null +++ b/cfg/cis-1.9/master.yaml @@ -0,0 +1,919 @@ +--- +controls: +version: "cis-1.9" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the + control plane node. + For example, chmod 600 $apiserverconf + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $apiserverconf + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $controllermanagerconf + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $controllermanagerconf + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 $schedulerconf + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root $schedulerconf + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $etcdconf + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $etcdconf + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 + scored: false + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" + audit: | + ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root + scored: false + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c permissions=%a "$DATA_DIR" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: | + DATA_DIR='' + for d in $(ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%'); do + if test -d "$d"; then DATA_DIR="$d"; fi + done + if ! test -d "$DATA_DIR"; then DATA_DIR=$etcddatadir; fi + stat -c %U:%G "$DATA_DIR" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). + For example, chown etcd:etcd /var/lib/etcd + scored: true + + - id: 1.1.13 + text: "Ensure that the default administrative credential file permissions are set to 600 (Automated)" + audit: | + for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "permissions=%a %n" $adminconf; fi; done + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /etc/kubernetes/admin.conf + On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present. + For example, chmod 600 /etc/kubernetes/super-admin.conf + scored: true + + - id: 1.1.14 + text: "Ensure that the default administrative credential file ownership is set to root:root (Automated)" + audit: | + for adminconf in /etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf; do if test -e $adminconf; then stat -c "ownership=%U:%G %n" $adminconf; fi; done + use_multiple_values: true + tests: + test_items: + - flag: "ownership" + compare: + op: eq + value: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /etc/kubernetes/admin.conf + On Kubernetes 1.29+ the super-admin.conf file should also be modified, if present. + For example, chown root:root /etc/kubernetes/super-admin.conf + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c %U:%G $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "find /etc/kubernetes/pki/ | xargs stat -c %U:%G" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /etc/kubernetes/pki/ + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.crt' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" + audit: "find /etc/kubernetes/pki/ -name '*.key' | xargs stat -c permissions=%a" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod -R 600 /etc/kubernetes/pki/*.key + scored: false + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --anonymous-auth=false + scored: false + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. Then, + edit the API server pod specification file $apiserverconf + on the control plane node and remove the --token-auth-file= parameter. + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "DenyServiceExternalIPs" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and add the `DenyServiceExternalIPs` plugin + to the enabled admission plugins, as such --enable-admission-plugin=DenyServiceExternalIPs. + scored: false + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the + apiserver and kubelets. Then, edit API server pod specification file + $apiserverconf on the control plane node and set the + kubelet client certificate and key parameters as below. + --kubelet-client-certificate= + --kubelet-client-key= + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + Follow the Kubernetes documentation and setup the TLS connection between + the apiserver and kubelets. Then, edit the API server pod specification file + $apiserverconf on the control plane node and set the + --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. + --kubelet-certificate-authority= + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. + One such example could be as below. + --authorization-mode=RBAC + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes Node. + --authorization-mode=Node,RBAC + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, + for example `--authorization-mode=Node,RBAC`. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the API server pod specification file $apiserverconf + and set the below parameters. + --enable-admission-plugins=...,EventRateLimit,... + --admission-control-config-file= + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a + value that does not include AlwaysAdmit. + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to include + AlwaysPullImages. + --enable-admission-plugins=...,AlwaysPullImages,... + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Follow the documentation and create ServiceAccount objects as per your environment. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and ensure that the --disable-admission-plugins parameter is set to a + value that does not include ServiceAccount. + scored: true + + - id: 1.2.13 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --disable-admission-plugins parameter to + ensure it does not include NamespaceLifecycle. + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --enable-admission-plugins parameter to a + value that includes NodeRestriction. + --enable-admission-plugins=...,NodeRestriction,... + scored: true + + - id: 1.2.15 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.2.16 + text: "Ensure that the --audit-log-path argument is set (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + --audit-log-path=/var/log/apiserver/audit.log + scored: true + + - id: 1.2.17 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxage parameter to 30 + or as an appropriate number of days, for example, + --audit-log-maxage=30 + scored: true + + - id: 1.2.18 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate + value. For example, + --audit-log-maxbackup=10 + scored: true + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. + For example, to set it as 100 MB, --audit-log-maxsize=100 + scored: true + + - id: 1.2.20 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + type: manual + remediation: | + Edit the API server pod specification file $apiserverconf + and set the below parameter as appropriate and if needed. + For example, --request-timeout=300s + scored: false + + - id: 1.2.21 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the below parameter. + --service-account-lookup=true + Alternatively, you can delete the --service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.22 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + Edit the API server pod specification file $apiserverconf + on the control plane node and set the --service-account-key-file parameter + to the public key file for service accounts. For example, + --service-account-key-file= + scored: true + + - id: 1.2.23 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + - flag: "--etcd-keyfile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate and key file parameters. + --etcd-certfile= + --etcd-keyfile= + scored: true + + - id: 1.2.24 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + - flag: "--tls-private-key-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the TLS certificate and private key file parameters. + --tls-cert-file= + --tls-private-key-file= + scored: true + + - id: 1.2.25 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection on the apiserver. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the client certificate authority file. + --client-ca-file= + scored: true + + - id: 1.2.26 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the etcd certificate authority file parameter. + --etcd-cafile= + scored: true + + - id: 1.2.27 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + Then, edit the API server pod specification file $apiserverconf + on the control plane node and set the --encryption-provider-config parameter to the path of that file. + For example, --encryption-provider-config= + scored: false + + - id: 1.2.28 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + Follow the Kubernetes documentation and configure a EncryptionConfig file. + In this file, choose aescbc, kms or secretbox as the encryption provider. + scored: false + + - id: 1.2.29 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml + on the control plane node and set the below parameter. + --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, + TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 + scored: false + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, + for example, --terminated-pod-gc-threshold=10 + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node to set the below parameter. + --use-service-account-credentials=true + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --service-account-private-key-file parameter + to the private key file for service accounts. + --service-account-private-key-file= + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. + --root-ca-file= + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. + --feature-gates=RotateKubeletServerCertificate=true + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Controller Manager pod specification file $controllermanagerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf file + on the control plane node and set the below parameter. + --profiling=false + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "/bin/ps -ef | grep $schedulerbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + - flag: "--bind-address" + set: false + remediation: | + Edit the Scheduler pod specification file $schedulerconf + on the control plane node and ensure the correct value for the --bind-address parameter + scored: true diff --git a/cfg/cis-1.9/node.yaml b/cfg/cis-1.9/node.yaml new file mode 100644 index 000000000..d4fa57a60 --- /dev/null +++ b/cfg/cis-1.9/node.yaml @@ -0,0 +1,478 @@ +--- +controls: +version: "cis-1.9" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chmod 600 $kubeletsvc + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + audit: '/bin/sh -c "if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; else echo \"File not found\"; fi"' + tests: + bin_op: or + test_items: + - flag: root:root + - flag: "File not found" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletsvc + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + set: true + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: false + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: false + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c %U:%G $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c permissions=%a $CAFILE; fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 + scored: false + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" + audit: | + CAFILE=$(ps -ef | grep kubelet | grep -v apiserver | grep -- --client-ca-file= | awk -F '--client-ca-file=' '{print $2}' | awk '{print $1}' | uniq) + if test -z $CAFILE; then CAFILE=$kubeletcafile; fi + if test -e $CAFILE; then stat -c %U:%G $CAFILE; fi + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root + scored: false + + - id: 4.1.9 + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the config file location identified in the Audit step) + chmod 600 $kubeletconf + scored: true + + - id: 4.1.10 + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identified in the Audit step) + chown root:root $kubeletconf + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to + `false`. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + `--anonymous-auth=false` + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If + using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --authorization-mode=Webhook + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to + the location of the client CA file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_AUTHZ_ARGS variable. + --client-ca-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --read-only-port=0 + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a + value other than 0. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + --streaming-connection-idle-timeout=5m + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove the --make-iptables-util-chains argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Manual)" + # This is one of those properties that can only be set as a command line argument. + # To check if the property is set as expected, we need to parse the kubelet command + # instead reading the Kubelet Configuration file. + audit: "/bin/ps -fC $kubeletbin" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and remove the --hostname-override argument from the + KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cert-file + path: '{.tlsCertFile}' + - flag: --tls-private-key-file + path: '{.tlsPrivateKeyFile}' + remediation: | + If using a Kubelet config file, edit the file to set `tlsCertFile` to the location + of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` + to the location of the corresponding private key file. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the below parameters in KUBELET_CERTIFICATE_ARGS variable. + --tls-cert-file= + --tls-private-key-file= + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or + remove it altogether to use the default value. + If using command line arguments, edit the kubelet service file + $kubeletsvc on each worker node and + remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS + variable. + Based on your system, restart the kubelet service. For example, + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + Edit the kubelet service file $kubeletsvc + on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. + --feature-gates=RotateKubeletServerCertificate=true + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to + TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + or to a subset of these values. + If using executable arguments, edit the kubelet service file + $kubeletsvc on each worker node and + set the --tls-cipher-suites parameter as follows, or to a subset of these values. + --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + Based on your system, restart the kubelet service. For example: + systemctl daemon-reload + systemctl restart kubelet.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat $kubeletconf" + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + scored: false + + - id: 4.3 + text: "kube-proxy" + checks: + - id: 4.3.1 + text: "Ensure that the kube-proxy metrics service is bound to localhost (Automated)" + audit: "/bin/ps -fC $proxybin" + audit_config: "/bin/sh -c 'if test -e $proxykubeconfig; then cat $proxykubeconfig; fi'" + tests: + bin_op: or + test_items: + - flag: "--metrics-bind-address" + path: '{.metricsBindAddress}' + compare: + op: has + value: "127.0.0.1" + - flag: "--metrics-bind-address" + path: '{.metricsBindAddress}' + set: false + remediation: | + Modify or remove any values which bind the metrics service to a non-localhost address. + The default value is 127.0.0.1:10249. + scored: true diff --git a/cfg/cis-1.9/policies.yaml b/cfg/cis-1.9/policies.yaml new file mode 100644 index 000000000..770d2cb28 --- /dev/null +++ b/cfg/cis-1.9/policies.yaml @@ -0,0 +1,405 @@ +--- +controls: +version: "cis-1.9" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Automated)" + audit: | + kubectl get clusterrolebindings -o=custom-columns=NAME:.metadata.name,ROLE:.roleRef.name,SUBJECT:.subjects[*].name --no-headers | while read -r role_name role_binding subject + do + if [[ "${role_name}" != "cluster-admin" && "${role_binding}" == "cluster-admin" ]]; then + is_compliant="false" + else + is_compliant="true" + fi; + echo "**role_name: ${role_name} role_binding: ${role_binding} subject: ${subject} is_compliant: ${is_compliant}" + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : kubectl delete clusterrolebinding [name] + Condition: is_compliant is false if rolename is not cluster-admin and rolebinding is cluster-admin. + scored: true + + - id: 5.1.2 + text: "Minimize access to secrets (Automated)" + audit: "echo \"canGetListWatchSecretsAsSystemAuthenticated: $(kubectl auth can-i get,list,watch secrets --all-namespaces --as=system:authenticated)\"" + tests: + test_items: + - flag: "canGetListWatchSecretsAsSystemAuthenticated" + compare: + op: eq + value: no + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: true + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Automated)" + audit: | + # Check Roles + kubectl get roles --all-namespaces -o custom-columns=ROLE_NAMESPACE:.metadata.namespace,ROLE_NAME:.metadata.name --no-headers | while read -r role_namespace role_name + do + role_rules=$(kubectl get role -n "${role_namespace}" "${role_name}" -o=json | jq -c '.rules') + if echo "${role_rules}" | grep -q "\[\"\*\"\]"; then + role_is_compliant="false" + else + role_is_compliant="true" + fi; + echo "**role_name: ${role_name} role_namespace: ${role_namespace} role_rules: ${role_rules} role_is_compliant: ${role_is_compliant}" + done + + # Check ClusterRoles + kubectl get clusterroles -o custom-columns=CLUSTERROLE_NAME:.metadata.name --no-headers | while read -r clusterrole_name + do + clusterrole_rules=$(kubectl get clusterrole "${clusterrole_name}" -o=json | jq -c '.rules') + if echo "${clusterrole_rules}" | grep -q "\[\"\*\"\]"; then + clusterrole_is_compliant="false" + else + clusterrole_is_compliant="true" + fi; + echo "**clusterrole_name: ${clusterrole_name} clusterrole_rules: ${clusterrole_rules} clusterrole_is_compliant: ${clusterrole_is_compliant}" + done + use_multiple_values: true + tests: + bin_op: or + test_items: + - flag: "role_is_compliant" + compare: + op: eq + value: true + set: true + - flag: "clusterrole_is_compliant" + compare: + op: eq + value: true + set: true + remediation: | + Where possible replace any use of wildcards ["*"] in roles and clusterroles with specific + objects or actions. + Condition: role_is_compliant is false if ["*"] is found in rules. + Condition: clusterrole_is_compliant is false if ["*"] is found in rules. + scored: true + + - id: 5.1.4 + text: "Minimize access to create pods (Automated)" + audit: | + echo "canCreatePodsAsSystemAuthenticated: $(kubectl auth can-i create pods --all-namespaces --as=system:authenticated)" + tests: + test_items: + - flag: "canCreatePodsAsSystemAuthenticated" + compare: + op: eq + value: no + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: true + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used (Automated)" + audit: | + kubectl get serviceaccount --all-namespaces --field-selector metadata.name=default -o=json | jq -r '.items[] | " namespace: \(.metadata.namespace), kind: \(.kind), name: \(.metadata.name), automountServiceAccountToken: \(.automountServiceAccountToken | if . == null then "notset" else . end )"' | xargs -L 1 + use_multiple_values: true + tests: + test_items: + - flag: "automountServiceAccountToken" + compare: + op: eq + value: false + set: true + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + `automountServiceAccountToken: false`. + scored: true + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)" + audit: | + kubectl get pods --all-namespaces -o custom-columns=POD_NAMESPACE:.metadata.namespace,POD_NAME:.metadata.name,POD_SERVICE_ACCOUNT:.spec.serviceAccount,POD_IS_AUTOMOUNTSERVICEACCOUNTTOKEN:.spec.automountServiceAccountToken --no-headers | while read -r pod_namespace pod_name pod_service_account pod_is_automountserviceaccounttoken + do + # Retrieve automountServiceAccountToken's value for ServiceAccount and Pod, set to notset if null or . + svacc_is_automountserviceaccounttoken=$(kubectl get serviceaccount -n "${pod_namespace}" "${pod_service_account}" -o json | jq -r '.automountServiceAccountToken' | sed -e 's//notset/g' -e 's/null/notset/g') + pod_is_automountserviceaccounttoken=$(echo "${pod_is_automountserviceaccounttoken}" | sed -e 's//notset/g' -e 's/null/notset/g') + if [ "${svacc_is_automountserviceaccounttoken}" = "false" ] && ( [ "${pod_is_automountserviceaccounttoken}" = "false" ] || [ "${pod_is_automountserviceaccounttoken}" = "notset" ] ); then + is_compliant="true" + elif [ "${svacc_is_automountserviceaccounttoken}" = "true" ] && [ "${pod_is_automountserviceaccounttoken}" = "false" ]; then + is_compliant="true" + else + is_compliant="false" + fi + echo "**namespace: ${pod_namespace} pod_name: ${pod_name} service_account: ${pod_service_account} pod_is_automountserviceaccounttoken: ${pod_is_automountserviceaccounttoken} svacc_is_automountServiceAccountToken: ${svacc_is_automountserviceaccounttoken} is_compliant: ${is_compliant}" + done + use_multiple_values: true + tests: + test_items: + - flag: "is_compliant" + compare: + op: eq + value: true + remediation: | + Modify the definition of ServiceAccounts and Pods which do not need to mount service + account tokens to disable it, with `automountServiceAccountToken: false`. + If both the ServiceAccount and the Pod's .spec specify a value for automountServiceAccountToken, the Pod spec takes precedence. + Condition: Pod is_compliant to true when + - ServiceAccount is automountServiceAccountToken: false and Pod is automountServiceAccountToken: false or notset + - ServiceAccount is automountServiceAccountToken: true notset and Pod is automountServiceAccountToken: false + scored: true + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: false + + - id: 5.2.7 + text: "Minimize the admission of root containers (Manual)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Manual)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilites in applications running on your cluster. Where a namespace + contains applicaions which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/config.yaml b/cfg/config.yaml index b26115f37..d5d170bbd 100644 --- a/cfg/config.yaml +++ b/cfg/config.yaml @@ -164,13 +164,13 @@ node: - "/var/snap/microk8s/current/credentials/kubelet.config" - "/etc/kubernetes/kubeconfig-kubelet" - "/var/lib/rancher/rke2/agent/kubelet.kubeconfig" - - "/var/lib/rancher/k3s/server/cred/admin.kubeconfig" - "/var/lib/rancher/k3s/agent/kubelet.kubeconfig" confs: - "/etc/kubernetes/kubelet-config.yaml" - "/var/lib/kubelet/config.yaml" - "/var/lib/kubelet/config.yml" - "/etc/kubernetes/kubelet/kubelet-config.json" + - "/etc/kubernetes/kubelet/config.json" - "/etc/kubernetes/kubelet/config" - "/home/kubernetes/kubelet-config.yaml" - "/home/kubernetes/kubelet-config.yml" @@ -190,7 +190,6 @@ node: - "/etc/systemd/system/snap.kubelet.daemon.service" - "/etc/systemd/system/snap.microk8s.daemon-kubelet.service" - "/etc/kubernetes/kubelet.yaml" - - "/var/lib/rancher/rke2/agent/kubelet.kubeconfig" defaultconf: "/var/lib/kubelet/config.yaml" defaultsvc: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf" @@ -281,11 +280,15 @@ version_mapping: "1.24": "cis-1.24" "1.25": "cis-1.7" "1.26": "cis-1.8" + "1.27": "cis-1.9" + "1.28": "cis-1.9" + "1.29": "cis-1.9" "eks-1.0.1": "eks-1.0.1" "eks-1.1.0": "eks-1.1.0" "eks-1.2.0": "eks-1.2.0" "gke-1.0": "gke-1.0" "gke-1.2.0": "gke-1.2.0" + "gke-1.6.0": "gke-1.6.0" "ocp-3.10": "rh-0.7" "ocp-3.11": "rh-0.7" "ocp-4.0": "rh-1.0" @@ -359,6 +362,12 @@ target_mapping: - "controlplane" - "etcd" - "policies" + "cis-1.9": + - "master" + - "node" + - "controlplane" + - "etcd" + - "policies" "gke-1.0": - "master" - "node" @@ -372,6 +381,12 @@ target_mapping: - "controlplane" - "policies" - "managedservices" + "gke-1.6.0": + - "master" + - "node" + - "controlplane" + - "policies" + - "managedservices" "eks-1.0.1": - "master" - "node" @@ -429,6 +444,12 @@ target_mapping: - "controlplane" - "node" - "policies" + "k3s-cis-1.8": + - "master" + - "etcd" + - "controlplane" + - "node" + - "policies" "k3s-cis-1.23": - "master" - "etcd" diff --git a/cfg/gke-1.6.0/config.yaml b/cfg/gke-1.6.0/config.yaml new file mode 100644 index 000000000..e15e43f48 --- /dev/null +++ b/cfg/gke-1.6.0/config.yaml @@ -0,0 +1,9 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +node: + proxy: + defaultkubeconfig: "/var/lib/kubelet/kubeconfig" + + kubelet: + defaultconf: "/etc/kubernetes/kubelet/kubelet-config.yaml" diff --git a/cfg/gke-1.6.0/controlplane.yaml b/cfg/gke-1.6.0/controlplane.yaml new file mode 100644 index 000000000..011d86075 --- /dev/null +++ b/cfg/gke-1.6.0/controlplane.yaml @@ -0,0 +1,20 @@ +--- +controls: +version: "gke-1.6.0" +id: 2 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 2.1 + text: "Authentication and Authorization" + checks: + - id: 2.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + + You can remediate the availability of client certificates in your GKE cluster. See + Recommendation 5.8.1. + scored: false diff --git a/cfg/gke-1.6.0/managedservices.yaml b/cfg/gke-1.6.0/managedservices.yaml new file mode 100644 index 000000000..e82320d67 --- /dev/null +++ b/cfg/gke-1.6.0/managedservices.yaml @@ -0,0 +1,617 @@ +--- +controls: +version: "gke-1.6.0" +id: 5 +text: "Managed Services" +type: "managedservices" +groups: + - id: 5.1 + text: "Image Registry and Image Scanning" + checks: + - id: 5.1.1 + text: "Ensure Image Vulnerability Scanning is enabled (Automated)" + type: "manual" + remediation: | + For Images Hosted in GCR: + Using Command Line: + + gcloud services enable containeranalysis.googleapis.com + + For Images Hosted in AR: + Using Command Line: + + gcloud services enable containerscanning.googleapis.com + scored: false + + - id: 5.1.2 + text: "Minimize user access to Container Image repositories (Manual)" + type: "manual" + remediation: | + For Images Hosted in AR: + Using Command Line: + + gcloud artifacts repositories set-iam-policy \ + --location + + To learn how to configure policy files see: https://cloud.google.com/artifact-registry/docs/access-control#grant + + For Images Hosted in GCR: + Using Command Line: + To change roles at the GCR bucket level: + Firstly, run the following if read permissions are required: + + gsutil iam ch ::objectViewer gs://artifacts..appspot.com + + Then remove the excessively privileged role (Storage Admin / Storage Object + Admin / Storage Object Creator) using: + + gsutil iam ch -d :: gs://artifacts..appspot.com + + where: + can be one of the following: + user, if the is a Google account. + serviceAccount, if specifies a Service account. + can be one of the following: + a Google account (for example, someone@example.com). + a Cloud IAM service account. + + To modify roles defined at the project level and subsequently inherited within the GCR + bucket, or the Service Account User role, extract the IAM policy file, modify it + accordingly and apply it using: + + gcloud projects set-iam-policy + scored: false + + - id: 5.1.3 + text: "Minimize cluster access to read-only for Container Image repositories (Manual)" + type: "manual" + remediation: | + For Images Hosted in AR: + Using Command Line: + Add artifactregistry.reader role + + gcloud artifacts repositories add-iam-policy-binding \ + --location= \ + --member='serviceAccount:' \ + --role='roles/artifactregistry.reader' + + Remove any roles other than artifactregistry.reader + + gcloud artifacts repositories remove-iam-policy-binding \ + --location \ + --member='serviceAccount:' \ + --role='' + + For Images Hosted in GCR: + For an account explicitly granted to the bucket: + Firstly add read access to the Kubernetes Service Account: + + gsutil iam ch ::objectViewer gs://artifacts..appspot.com + + where: + can be one of the following: + user, if the is a Google account. + serviceAccount, if specifies a Service account. + can be one of the following: + a Google account (for example, someone@example.com). + a Cloud IAM service account. + + Then remove the excessively privileged role (Storage Admin / Storage Object + Admin / Storage Object Creator) using: + + gsutil iam ch -d :: gs://artifacts..appspot.com + + For an account that inherits access to the GCR Bucket through Project level + permissions, modify the Projects IAM policy file accordingly, then upload it using: + + gcloud projects set-iam-policy + scored: false + + - id: 5.1.4 + text: "Ensure only trusted container images are used (Manual)" + type: "manual" + remediation: | + Using Command Line: + Update the cluster to enable Binary Authorization: + + gcloud container cluster update --enable-binauthz + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference: + https://cloud.google.com/binary-authorization/docs/policy-yaml-reference for guidance. + + Import the policy file into Binary Authorization: + + gcloud container binauthz policy import + scored: false + + - id: 5.2 + text: "Identity and Access Management (IAM)" + checks: + - id: 5.2.1 + text: "Ensure GKE clusters are not running using the Compute Engine default service account (Automated))" + type: "manual" + remediation: | + Using Command Line: + To create a minimally privileged service account: + + gcloud iam service-accounts create \ + --display-name "GKE Node Service Account" + export NODE_SA_EMAIL=gcloud iam service-accounts list \ + --format='value(email)' --filter='displayName:GKE Node Service Account' + + Grant the following roles to the service account: + + export PROJECT_ID=gcloud config get-value project + gcloud projects add-iam-policy-binding --member \ + serviceAccount: --role roles/monitoring.metricWriter + gcloud projects add-iam-policy-binding --member \ + serviceAccount: --role roles/monitoring.viewer + gcloud projects add-iam-policy-binding --member \ + serviceAccount: --role roles/logging.logWriter + + To create a new Node pool using the Service account, run the following command: + + gcloud container node-pools create \ + --service-account=@.iam.gserviceaccount.com \ + --cluster= --zone + + Note: The workloads will need to be migrated to the new Node pool, and the old node + pools that use the default service account should be deleted to complete the + remediation. + scored: false + + - id: 5.2.2 + text: "Prefer using dedicated GCP Service Accounts and Workload Identity (Manual)" + type: "manual" + remediation: | + Using Command Line: + + gcloud container clusters update --zone \ + --workload-pool .svc.id.goog + + Note that existing Node pools are unaffected. New Node pools default to --workload- + metadata-from-node=GKE_METADATA_SERVER. + + Then, modify existing Node pools to enable GKE_METADATA_SERVER: + + gcloud container node-pools update --cluster \ + --zone --workload-metadata=GKE_METADATA + + Workloads may need to be modified in order for them to use Workload Identity as + described within: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity. + Also consider the effects on the availability of hosted workloads as Node pools + are updated. It may be more appropriate to create new Node Pools. + scored: false + + - id: 5.3 + text: "Cloud Key Management Service (Cloud KMS)" + checks: + - id: 5.3.1 + text: "Ensure Kubernetes Secrets are encrypted using keys managed in Cloud KMS (Automated)" + type: "manual" + remediation: | + To create a key: + Create a key ring: + + gcloud kms keyrings create --location --project \ + + + Create a key: + + gcloud kms keys create --location --keyring \ + --purpose encryption --project + + Grant the Kubernetes Engine Service Agent service account the Cloud KMS + CryptoKey Encrypter/Decrypter role: + + gcloud kms keys add-iam-policy-binding --location \ + --keyring --member serviceAccount: \ + --role roles/cloudkms.cryptoKeyEncrypterDecrypter --project + + To create a new cluster with Application-layer Secrets Encryption: + + gcloud container clusters create --cluster-version=latest \ + --zone \ + --database-encryption-key projects//locations//keyRings//cryptoKeys/ \ + --project + + To enable on an existing cluster: + + gcloud container clusters update --zone \ + --database-encryption-key projects//locations//keyRings//cryptoKeys/ \ + --project + scored: false + + - id: 5.4 + text: "Node Metadata" + checks: + - id: 5.4.1 + text: "Ensure the GKE Metadata Server is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + + gcloud container clusters update --identity-namespace=.svc.id.goog + + Note that existing Node pools are unaffected. New Node pools default to --workload- + metadata-from-node=GKE_METADATA_SERVER. + + To modify an existing Node pool to enable GKE Metadata Server: + + gcloud container node-pools update --cluster= \ + --workload-metadata-from-node=GKE_METADATA_SERVER + + Workloads may need modification in order for them to use Workload Identity as + described within: https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity. + scored: false + + - id: 5.5 + text: "Node Configuration and Maintenance" + checks: + - id: 5.5.1 + text: "Ensure Container-Optimized OS (cos_containerd) is used for GKE node images (Automated)" + type: "manual" + remediation: | + Using Command Line: + To set the node image to cos for an existing cluster's Node pool: + + gcloud container clusters upgrade --image-type cos_containerd \ + --zone --node-pool + scored: false + + - id: 5.5.2 + text: "Ensure Node Auto-Repair is enabled for GKE nodes (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable node auto-repair for an existing cluster's Node pool: + + gcloud container node-pools update --cluster \ + --zone --enable-autorepair + scored: false + + - id: 5.5.3 + text: "Ensure Node Auto-Upgrade is enabled for GKE nodes (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable node auto-upgrade for an existing cluster's Node pool, run the following + command: + + gcloud container node-pools update --cluster \ + --zone --enable-autoupgrade + scored: false + + - id: 5.5.4 + text: "When creating New Clusters - Automate GKE version management using Release Channels (Automated)" + type: "manual" + remediation: | + Using Command Line: + Create a new cluster by running the following command: + + gcloud container clusters create --zone \ + --release-channel + + where is stable or regular, according to requirements. + scored: false + + - id: 5.5.5 + text: "Ensure Shielded GKE Nodes are Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To migrate an existing cluster, the flag --enable-shielded-nodes needs to be + specified in the cluster update command: + + gcloud container clusters update --zone \ + --enable-shielded-nodes + scored: false + + - id: 5.5.6 + text: "Ensure Integrity Monitoring for Shielded GKE Nodes is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Integrity Monitoring enabled, run the + following command: + + gcloud container node-pools create --cluster \ + --zone --shielded-integrity-monitoring + + Workloads from existing non-conforming Node pools will need to be migrated to the + newly created Node pool, then delete non-conforming Node pools to complete the + remediation + scored: false + + - id: 5.5.7 + text: "Ensure Secure Boot for Shielded GKE Nodes is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To create a Node pool within the cluster with Secure Boot enabled, run the following + command: + + gcloud container node-pools create --cluster \ + --zone --shielded-secure-boot + + Workloads will need to be migrated from existing non-conforming Node pools to the + newly created Node pool, then delete the non-conforming pools. + scored: false + + - id: 5.6 + text: "Cluster Networking" + checks: + - id: 5.6.1 + text: "Enable VPC Flow Logs and Intranode Visibility (Automated)" + type: "manual" + remediation: | + Using Command Line: + 1. Find the subnetwork name associated with the cluster. + + gcloud container clusters describe \ + --region - -format json | jq '.subnetwork' + + 2. Update the subnetwork to enable VPC Flow Logs. + gcloud compute networks subnets update --enable-flow-logs + scored: false + + - id: 5.6.2 + text: "Ensure use of VPC-native clusters (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable Alias IP on a new cluster, run the following command: + + gcloud container clusters create --zone \ + --enable-ip-alias + + If using Autopilot configuration mode: + + gcloud container clusters create-auto \ + --zone + scored: false + + - id: 5.6.3 + text: "Ensure Control Plane Authorized Networks is Enabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To enable Control Plane Authorized Networks for an existing cluster, run the following + command: + + gcloud container clusters update --zone \ + --enable-master-authorized-networks + + Along with this, you can list authorized networks using the --master-authorized-networks + flag which contains a list of up to 20 external networks that are allowed to + connect to your cluster's control plane through HTTPS. You provide these networks as + a comma-separated list of addresses in CIDR notation (such as 90.90.100.0/24). + scored: false + + - id: 5.6.4 + text: "Ensure clusters are created with Private Endpoint Enabled and Public Access Disabled (Manual)" + type: "manual" + remediation: | + Using Command Line: + Create a cluster with a Private Endpoint enabled and Public Access disabled by including + the --enable-private-endpoint flag within the cluster create command: + + gcloud container clusters create --enable-private-endpoint + + Setting this flag also requires the setting of --enable-private-nodes, --enable-ip-alias + and --master-ipv4-cidr=. + scored: false + + - id: 5.6.5 + text: "Ensure clusters are created with Private Nodes (Manual)" + type: "manual" + remediation: | + Using Command Line: + To create a cluster with Private Nodes enabled, include the --enable-private-nodes + flag within the cluster create command: + + gcloud container clusters create --enable-private-nodes + + Setting this flag also requires the setting of --enable-ip-alias and + --master-ipv4-cidr=. + scored: false + + - id: 5.6.6 + text: "Consider firewalling GKE worker nodes (Manual)" + type: "manual" + remediation: | + Using Command Line: + Use the following command to generate firewall rules, setting the variables as + appropriate: + + gcloud compute firewall-rules create \ + --network --priority --direction \ + --action --target-tags \ + --target-service-accounts \ + --source-ranges --source-tags \ + --source-service-accounts \ + --destination-ranges --rules + scored: false + + - id: 5.6.7 + text: "Ensure use of Google-managed SSL Certificates (Automated)" + type: "manual" + remediation: | + If services of type:LoadBalancer are discovered, consider replacing the Service with + an Ingress. + + To configure the Ingress and use Google-managed SSL certificates, follow the + instructions as listed at: https://cloud.google.com/kubernetes-engine/docs/how- + to/managed-certs. + scored: false + + - id: 5.7 + text: "Logging" + checks: + - id: 5.7.1 + text: "Ensure Logging and Cloud Monitoring is Enabled (Automated)" + type: "manual" + remediation: | + To enable Logging for an existing cluster, run the following command: + gcloud container clusters update --zone \ + --logging= + + See https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--logging + for a list of available components for logging. + + To enable Cloud Monitoring for an existing cluster, run the following command: + gcloud container clusters update --zone \ + --monitoring= + + See https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#-- + monitoring for a list of available components for Cloud Monitoring. + scored: false + + - id: 5.7.2 + text: "Enable Linux auditd logging (Manual)" + type: "manual" + remediation: | + Using Command Line: + Download the example manifests: + curl https://raw.githubusercontent.com/GoogleCloudPlatform/k8s-node-tools/master/os-audit/cos-auditd-logging.yaml > cos-auditd-logging.yaml + + Edit the example manifests if needed. Then, deploy them: + kubectl apply -f cos-auditd-logging.yaml + + Verify that the logging Pods have started. If a different Namespace was defined in the + manifests, replace cos-auditd with the name of the namespace being used: + kubectl get pods --namespace=cos-auditd + scored: false + + - id: 5.8 + text: "Authentication and Authorization" + checks: + - id: 5.8.1 + text: "Ensure authentication using Client Certificates is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + Create a new cluster without a Client Certificate: + gcloud container clusters create [CLUSTER_NAME] \ + --no-issue-client-certificate + scored: false + + - id: 5.8.2 + text: "Manage Kubernetes RBAC users with Google Groups for GKE (Manual)" + type: "manual" + remediation: | + Using Command Line: + Follow the G Suite Groups instructions at: https://cloud.google.com/kubernetes- + engine/docs/how-to/role-based-access-control#google-groups-for-gke. + + Then, create a cluster with: + gcloud container clusters create --security-group + + Finally create Roles, ClusterRoles, RoleBindings, and ClusterRoleBindings that + reference the G Suite Groups. + scored: false + + - id: 5.8.3 + text: "Ensure Legacy Authorization (ABAC) is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To disable Legacy Authorization for an existing cluster, run the following command: + gcloud container clusters update --zone \ + --no-enable-legacy-authorization + scored: false + + - id: 5.9 + text: "Storage" + checks: + - id: 5.9.1 + text: "Enable Customer-Managed Encryption Keys (CMEK) for GKE Persistent Disks (PD) (Manual)" + type: "manual" + remediation: | + Using Command Line: + Follow the instructions detailed at: https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek. + scored: false + + - id: 5.9.2 + text: "Enable Customer-Managed Encryption Keys (CMEK) for Boot Disks (Automated)" + type: "manual" + remediation: | + Using Command Line: + Create a new node pool using customer-managed encryption keys for the node boot + disk, of either pd-standard or pd-ssd: + gcloud container node-pools create --disk-type \ + --boot-disk-kms-key projects//locations//keyRings//cryptoKeys/ + + Create a cluster using customer-managed encryption keys for the node boot disk, of + either pd-standard or pd-ssd: + gcloud container clusters create --disk-type \ + --boot-disk-kms-key projects//locations//keyRings//cryptoKeys/ + scored: false + + - id: 5.10 + text: "Other Cluster Configurations" + checks: + - id: 5.10.1 + text: "Ensure Kubernetes Web UI is Disabled (Automated)" + type: "manual" + remediation: | + Using Command Line: + To disable the Kubernetes Dashboard on an existing cluster, run the following + command: + gcloud container clusters update --zone \ + --update-addons=KubernetesDashboard=DISABLED + scored: false + + - id: 5.10.2 + text: "Ensure that Alpha clusters are not used for production workloads (Automated)" + type: "manual" + remediation: | + Using Command Line: + Upon creating a new cluster + gcloud container clusters create [CLUSTER_NAME] \ + --zone [COMPUTE_ZONE] + + Do not use the --enable-kubernetes-alpha argument. + scored: false + + - id: 5.10.3 + text: "Consider GKE Sandbox for running untrusted workloads (Manual)" + type: "manual" + remediation: | + Using Command Line: + To enable GKE Sandbox on an existing cluster, a new Node pool must be created, + which can be done using: + gcloud container node-pools create --zone \ + --cluster --image-type=cos_containerd --sandbox="type=gvisor" + scored: false + + - id: 5.10.4 + text: "Ensure use of Binary Authorization (Automated)" + type: "manual" + remediation: | + Using Command Line: + Update the cluster to enable Binary Authorization: + gcloud container cluster update --zone \ + --binauthz-evaluation-mode= + + Example: + gcloud container clusters update $CLUSTER_NAME --zone $COMPUTE_ZONE \ + --binauthz-evaluation-mode=PROJECT_SINGLETON_POLICY_ENFORCE + + See: https://cloud.google.com/sdk/gcloud/reference/container/clusters/update#--binauthz-evaluation-mode + for more details around the evaluation modes available. + + Create a Binary Authorization Policy using the Binary Authorization Policy Reference: + https://cloud.google.com/binary-authorization/docs/policy-yaml-reference for guidance. + + Import the policy file into Binary Authorization: + gcloud container binauthz policy import + scored: false + + - id: 5.10.5 + text: "Enable Security Posture (Manual)" + type: "manual" + remediation: | + Enable security posture via the UI, gCloud or API. + https://cloud.google.com/kubernetes-engine/docs/how-to/protect-workload-configuration + scored: false diff --git a/cfg/gke-1.6.0/master.yaml b/cfg/gke-1.6.0/master.yaml new file mode 100644 index 000000000..9686bf2f8 --- /dev/null +++ b/cfg/gke-1.6.0/master.yaml @@ -0,0 +1,6 @@ +--- +controls: +version: "gke-1.6.0" +id: 1 +text: "Control Plane Components" +type: "master" diff --git a/cfg/gke-1.6.0/node.yaml b/cfg/gke-1.6.0/node.yaml new file mode 100644 index 000000000..bac80720e --- /dev/null +++ b/cfg/gke-1.6.0/node.yaml @@ -0,0 +1,506 @@ +--- +controls: +version: "gke-1.6.0" +id: 3 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 3.1 + text: "Worker Node Configuration Files" + checks: + - id: 3.1.1 + text: "Ensure that the proxy kubeconfig file permissions are set to 644 or more restrictive (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "644" + remediation: | + Run the below command (based on the file location on your system) on each worker node. + For example, + + chmod 644 $proxykubeconfig + scored: true + + - id: 3.1.2 + text: "Ensure that the proxy kubeconfig file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on each worker node. + For example: + + chown root:root $proxykubeconfig + scored: true + + - id: 3.1.3 + text: "Ensure that the kubelet configuration file has permissions set to 600 (Manual)" + audit: '/bin/sh -c ''if test -e /home/kubernetes/kubelet-config.yaml; then stat -c permissions=%a /home/kubernetes/kubelet-config.yaml; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command (using the kubelet config file location) + + chmod 644 /home/kubernetes/kubelet-config.yaml + scored: true + + - id: 3.1.4 + text: "Ensure that the kubelet configuration file ownership is set to root:root (Manual)" + audit: '/bin/sh -c ''if test -e /home/kubernetes/kubelet-config.yaml; then stat -c %U:%G /home/kubernetes/kubelet-config.yaml; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Run the following command (using the config file location identied in the Audit step) + + chown root:root /home/kubernetes/kubelet-config.yaml + scored: true + + - id: 3.2 + text: "Kubelet" + checks: + - id: 3.2.1 + text: "Ensure that the Anonymous Auth is Not Enabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + Remediation Method 1: + If configuring via the Kubelet config file, you first need to locate the file. + To do this, SSH to each node and execute the following command to find the kubelet + process: + + ps -ef | grep kubelet + + The output of the above command provides details of the active kubelet process, from + which we can see the location of the configuration file provided to the kubelet service + with the --config argument. The file can be viewed with a command such as more or + less, like so: + + sudo less /home/kubernetes/kubelet-config.yaml + + Disable Anonymous Authentication by setting the following parameter: + + "authentication": { "anonymous": { "enabled": false } } + + Remediation Method 2: + If using executable arguments, edit the kubelet service file on each worker node and + ensure the below parameters are part of the KUBELET_ARGS variable string. + + For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or + Bottlerocket AMIs, then this file can be found at + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise, + you may need to look up documentation for your chosen operating system to determine + which service manager is configured: + + --anonymous-auth=false + + For Both Remediation Steps: + Based on your system, restart the kubelet service and check the service status. + The following example is for operating systems using systemd, such as the Amazon + EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl + command. If systemctl is not available then you will need to look up documentation for + your chosen operating system to determine which service manager is configured: + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + Remediation Method 1: + If configuring via the Kubelet config file, you first need to locate the file. + To do this, SSH to each node and execute the following command to find the kubelet + process: + + ps -ef | grep kubelet + + The output of the above command provides details of the active kubelet process, from + which we can see the location of the configuration file provided to the kubelet service + with the --config argument. The file can be viewed with a command such as more or + less, like so: + + sudo less /path/to/kubelet-config.json + + Enable Webhook Authentication by setting the following parameter: + + "authentication": { "webhook": { "enabled": true } } + + Next, set the Authorization Mode to Webhook by setting the following parameter: + + "authorization": { "mode": "Webhook } + + Finer detail of the authentication and authorization fields can be found in the + Kubelet Configuration documentation (https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/). + + Remediation Method 2: + If using executable arguments, edit the kubelet service file on each worker node and + ensure the below parameters are part of the KUBELET_ARGS variable string. + For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or + Bottlerocket AMIs, then this file can be found at + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise, + you may need to look up documentation for your chosen operating system to determine + which service manager is configured: + + --authentication-token-webhook + --authorization-mode=Webhook + + For Both Remediation Steps: + Based on your system, restart the kubelet service and check the service status. + The following example is for operating systems using systemd, such as the Amazon + EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl + command. If systemctl is not available then you will need to look up documentation for + your chosen operating system to determine which service manager is configured: + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.3 + text: "Ensure that a Client CA File is Configured (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + set: true + remediation: | + Remediation Method 1: + If configuring via the Kubelet config file, you first need to locate the file. + To do this, SSH to each node and execute the following command to find the kubelet + process: + + ps -ef | grep kubelet + + The output of the above command provides details of the active kubelet process, from + which we can see the location of the configuration file provided to the kubelet service + with the --config argument. The file can be viewed with a command such as more or + less, like so: + + sudo less /path/to/kubelet-config.json + + Configure the client certificate authority file by setting the following parameter + appropriately: + + "authentication": { "x509": {"clientCAFile": } }" + + Remediation Method 2: + If using executable arguments, edit the kubelet service file on each worker node and + ensure the below parameters are part of the KUBELET_ARGS variable string. + For systems using systemd, such as the Amazon EKS Optimised Amazon Linux or + Bottlerocket AMIs, then this file can be found at + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf. Otherwise, + you may need to look up documentation for your chosen operating system to determine + which service manager is configured: + + --client-ca-file= + + For Both Remediation Steps: + Based on your system, restart the kubelet service and check the service status. + The following example is for operating systems using systemd, such as the Amazon + EKS Optimised Amazon Linux or Bottlerocket AMIs, and invokes the systemctl + command. If systemctl is not available then you will need to look up documentation for + your chosen operating system to determine which service manager is configured: + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.4 + text: "Ensure that the --read-only-port argument is disabled (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + bin_op: or + remediation: | + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to 0 + + "readOnlyPort": 0 + + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --read-only-port=0 + + For each remediation: + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet-config.yaml and set the below parameter to a non-zero + value in the format of #h#m#s + + "streamingConnectionIdleTimeout": "4h0m0s" + + You should ensure that the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not + specify a --streaming-connection-idle-timeout argument because it would + override the Kubelet config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --streaming-connection-idle-timeout=4h0m0s + + Remediation Method 3: + If using the api configz endpoint consider searching for the status of + "streamingConnectionIdleTimeout": by extracting the live configuration from the + nodes running kubelet. + **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a + Live Cluster (https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), + and then rerun the curl statement from audit process to check for kubelet + configuration changes + + kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from + "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + + For all three remediations: + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-utils-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet/kubelet-config.json and set the below parameter to + true + + "makeIPTablesUtilChains": true + + Ensure that /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf + does not set the --make-iptables-util-chains argument because that would + override your Kubelet config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --make-iptables-util-chains:true + + Remediation Method 3: + If using the api configz endpoint consider searching for the status of + "makeIPTablesUtilChains.: true by extracting the live configuration from the nodes + running kubelet. + + **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a + Live Cluster (https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), + and then rerun the curl statement from audit process to check for kubelet + configuration changes + + kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from + "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + + For all three remediations: + Based on your system, restart the kubelet service and check status + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true + + - id: 3.2.7 + text: "Ensure that the --eventRecordQPS argument is set to 0 or a level which ensures appropriate event capture (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /etc/systemd/system/kubelet.service.d/10-kubeadm.conf" + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + set: true + compare: + op: eq + value: 0 + remediation: | + If using a Kubelet config file, edit the file to set eventRecordQPS: to an appropriate level. + + If using command line arguments, edit the kubelet service file /etc/systemd/system/kubelet.service.d/10-kubeadm.conf + on each worker node and set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. + + Based on your system, restart the kubelet service. For example: + + systemctl daemon-reload + systemctl restart kubelet.service + scored: true + + - id: 3.2.8 + text: "Ensure that the --rotate-certificates argument is not present or is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.yaml file + /etc/kubernetes/kubelet/kubelet-config.yaml and set the below parameter to + true + + "RotateCertificate":true + + Additionally, ensure that the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set the --RotateCertificate + executable argument to false because this would override the Kubelet + config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --RotateCertificate=true + scored: true + + - id: 3.2.9 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "/bin/ps -fC $kubeletbin" + audit_config: "/bin/cat /home/kubernetes/kubelet-config.yaml" + tests: + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: eq + value: true + remediation: | + Remediation Method 1: + If modifying the Kubelet config file, edit the kubelet-config.json file + /etc/kubernetes/kubelet-config.yaml and set the below parameter to true + + "featureGates": { + "RotateKubeletServerCertificate":true + }, + + Additionally, ensure that the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf does not set + the --rotate-kubelet-server-certificate executable argument to false because + this would override the Kubelet config file. + + Remediation Method 2: + If using executable arguments, edit the kubelet service file + /etc/systemd/system/kubelet.service.d/10-kubelet-args.conf on each + worker node and add the below parameter at the end of the KUBELET_ARGS variable + string. + + --rotate-kubelet-server-certificate=true + + Remediation Method 3: + If using the api configz endpoint consider searching for the status of + "RotateKubeletServerCertificate": by extracting the live configuration from the + nodes running kubelet. + **See detailed step-by-step configmap procedures in Reconfigure a Node's Kubelet in a + Live Cluster (https://kubernetes.io/docs/tasks/administer-cluster/reconfigure-kubelet/), + and then rerun the curl statement from audit process to check for kubelet + configuration changes + + kubectl proxy --port=8001 & + export HOSTNAME_PORT=localhost:8001 (example host and port number) + export NODE_NAME=gke-cluster-1-pool1-5e572947-r2hg (example node name from + "kubectl get nodes") + curl -sSL "http://${HOSTNAME_PORT}/api/v1/nodes/${NODE_NAME}/proxy/configz" + + For all three remediation methods: + Restart the kubelet service and check status. The example below is for when using + systemctl to manage services: + + systemctl daemon-reload + systemctl restart kubelet.service + systemctl status kubelet -l + scored: true diff --git a/cfg/gke-1.6.0/policies.yaml b/cfg/gke-1.6.0/policies.yaml new file mode 100644 index 000000000..333335536 --- /dev/null +++ b/cfg/gke-1.6.0/policies.yaml @@ -0,0 +1,238 @@ +--- +controls: +version: "gke-1.6.0" +id: 4 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 4.1 + text: "RBAC and Service Accounts" + checks: + - id: 4.1.1 + text: "Ensure that the cluster-admin role is only used where required (Automated)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + + kubectl delete clusterrolebinding [name] + scored: false + + - id: 4.1.2 + text: "Minimize access to secrets (Automated)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to secret objects in the cluster. + scored: false + + - id: 4.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Automated)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 4.1.4 + text: "Ensure that default service accounts are not actively used (Automated)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific + access to the Kubernetes API server. + + Modify the configuration of each default service account to include this value + + automountServiceAccountToken: false + scored: false + + - id: 4.1.5 + text: "Ensure that Service Account Tokens are only mounted where necessary (Automated)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 4.1.6 + text: "Avoid use of system:masters group (Automated)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 4.1.7 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 4.1.8 + text: "Avoid bindings to system:anonymous (Automated)" + type: "manual" + remediation: | + Identify all clusterrolebindings and rolebindings to the user system:anonymous. + Check if they are used and review the permissions associated with the binding using the + commands in the Audit section above or refer to GKE documentation + (https://cloud.google.com/kubernetes-engine/docs/best-practices/rbac#detect-prevent-default). + + Strongly consider replacing unsafe bindings with an authenticated, user-defined group. + Where possible, bind to non-default, user-defined groups with least-privilege roles. + + If there are any unsafe bindings to the user system:anonymous, proceed to delete them + after consideration for cluster operations with only necessary, safer bindings. + + kubectl delete clusterrolebinding [CLUSTER_ROLE_BINDING_NAME] + kubectl delete rolebinding [ROLE_BINDING_NAME] --namespace [ROLE_BINDING_NAMESPACE] + scored: false + + - id: 4.1.9 + text: "Avoid non-default bindings to system:unauthenticated (Automated)" + type: "manual" + remediation: | + Identify all non-default clusterrolebindings and rolebindings to the group + system:unauthenticated. Check if they are used and review the permissions + associated with the binding using the commands in the Audit section above or refer to + GKE documentation (https://cloud.google.com/kubernetes-engine/docs/best-practices/rbac#detect-prevent-default). + + Strongly consider replacing non-default, unsafe bindings with an authenticated, user- + defined group. Where possible, bind to non-default, user-defined groups with least- + privilege roles. + + If there are any non-default, unsafe bindings to the group system:unauthenticated, + proceed to delete them after consideration for cluster operations with only necessary, + safer bindings. + + kubectl delete clusterrolebinding [CLUSTER_ROLE_BINDING_NAME] + kubectl delete rolebinding [ROLE_BINDING_NAME] --namespace [ROLE_BINDING_NAMESPACE] + scored: false + + - id: 4.1.10 + text: "Avoid non-default bindings to system:authenticated (Automated)" + type: "manual" + remediation: | + Identify all non-default clusterrolebindings and rolebindings to the group + system:authenticated. Check if they are used and review the permissions associated + with the binding using the commands in the Audit section above or refer to GKE + documentation. + + Strongly consider replacing non-default, unsafe bindings with an authenticated, user- + defined group. Where possible, bind to non-default, user-defined groups with least- + privilege roles. + + If there are any non-default, unsafe bindings to the group system:authenticated, + proceed to delete them after consideration for cluster operations with only necessary, + safer bindings. + + kubectl delete clusterrolebinding [CLUSTER_ROLE_BINDING_NAME] + kubectl delete rolebinding [ROLE_BINDING_NAME] --namespace [ROLE_BINDING_NAMESPACE] + scored: false + + - id: 4.2 + text: "Pod Security Standards" + checks: + - id: 4.2.1 + text: "Ensure that the cluster enforces Pod Security Standard Baseline profile or stricter for all namespaces. (Manual)" + type: "manual" + remediation: | + Ensure that Pod Security Admission is in place for every namespace which contains + user workloads. + Run the following command to enforce the Baseline profile in a namespace: + + kubectl label namespace pod-security.kubernetes.io/enforce=baseline + scored: false + + - id: 4.3 + text: "Network Policies and CNI" + checks: + - id: 4.3.1 + text: "Ensure that the CNI in use supports Network Policies (Manual)" + type: "manual" + remediation: | + To use a CNI plugin with Network Policy, enable Network Policy in GKE, and the CNI plugin + will be updated. See Recommendation 5.6.7. + scored: false + + - id: 4.3.2 + text: "Ensure that all Namespaces have Network Policies defined (Automated)" + type: "manual" + remediation: | + Follow the documentation and create NetworkPolicy objects as needed. + See: https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy#creating_a_network_policy + for more information. + scored: false + + - id: 4.4 + text: "Secrets Management" + checks: + - id: 4.4.1 + text: "Prefer using secrets as files over secrets as environment variables (Automated)" + type: "manual" + remediation: | + if possible, rewrite application code to read secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 4.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 4.5 + text: "Extensible Admission Control" + checks: + - id: 4.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + Also see recommendation 5.10.4. + scored: false + + - id: 4.6 + text: "General Policies" + checks: + - id: 4.6.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 4.6.2 + text: "Ensure that the seccomp profile is set to RuntimeDefault in your pod definitions (Automated)" + type: "manual" + remediation: | + Use security context to enable the RuntimeDefault seccomp profile in your pod + definitions. An example is as below: + + { + "namespace": "kube-system", + "name": "metrics-server-v0.7.0-dbcc8ddf6-gz7d4", + "seccompProfile": "RuntimeDefault" + } + scored: false + + - id: 4.6.3 + text: "Apply Security Context to Your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply security contexts to your pods. For a + suggested list of security contexts, you may refer to the CIS Google Container- + Optimized OS Benchmark. + scored: false + + - id: 4.6.4 + text: "The default namespace should not be used (Automated)" + type: "manual" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/k3s-cis-1.23/controlplane.yaml b/cfg/k3s-cis-1.23/controlplane.yaml index 1aec579e5..aeee0a77c 100644 --- a/cfg/k3s-cis-1.23/controlplane.yaml +++ b/cfg/k3s-cis-1.23/controlplane.yaml @@ -21,7 +21,7 @@ groups: checks: - id: 3.2.1 text: "Ensure that a minimal audit policy is created (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" type: "manual" tests: test_items: diff --git a/cfg/k3s-cis-1.23/master.yaml b/cfg/k3s-cis-1.23/master.yaml index a03bca99c..0209dc73e 100644 --- a/cfg/k3s-cis-1.23/master.yaml +++ b/cfg/k3s-cis-1.23/master.yaml @@ -323,7 +323,7 @@ groups: checks: - id: 1.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" type: manual tests: test_items: @@ -371,7 +371,7 @@ groups: - id: 1.2.4 text: "Ensure that the --kubelet-https argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-https'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-https'" type: "skip" tests: bin_op: or @@ -389,7 +389,7 @@ groups: - id: 1.2.5 text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" tests: bin_op: and test_items: @@ -406,7 +406,7 @@ groups: - id: 1.2.6 text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" tests: test_items: - flag: "--kubelet-certificate-authority" @@ -420,7 +420,7 @@ groups: - id: 1.2.7 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -436,7 +436,7 @@ groups: - id: 1.2.8 text: "Ensure that the --authorization-mode argument includes Node (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -451,7 +451,7 @@ groups: - id: 1.2.9 text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -466,7 +466,7 @@ groups: - id: 1.2.10 text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -483,7 +483,7 @@ groups: - id: 1.2.11 text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: bin_op: or test_items: @@ -517,7 +517,7 @@ groups: - id: 1.2.13 text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: bin_op: or test_items: @@ -538,7 +538,7 @@ groups: - id: 1.2.14 text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" tests: bin_op: or test_items: @@ -557,7 +557,7 @@ groups: - id: 1.2.15 text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" tests: bin_op: or test_items: @@ -575,7 +575,7 @@ groups: - id: 1.2.16 text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -592,7 +592,7 @@ groups: - id: 1.2.17 text: "Ensure that the --secure-port argument is not set to 0 (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" tests: bin_op: or test_items: @@ -610,7 +610,7 @@ groups: - id: 1.2.18 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" tests: test_items: - flag: "--profiling" @@ -625,7 +625,7 @@ groups: - id: 1.2.19 text: "Ensure that the --audit-log-path argument is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" type: "skip" tests: test_items: @@ -639,7 +639,7 @@ groups: - id: 1.2.20 text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" type: "skip" tests: test_items: @@ -656,7 +656,7 @@ groups: - id: 1.2.21 text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" type: "skip" tests: test_items: @@ -673,7 +673,7 @@ groups: - id: 1.2.22 text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" type: "skip" tests: test_items: @@ -689,7 +689,7 @@ groups: - id: 1.2.23 text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" type: "skip" tests: test_items: @@ -702,7 +702,7 @@ groups: - id: 1.2.24 text: "Ensure that the --service-account-lookup argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" tests: bin_op: or test_items: @@ -722,7 +722,7 @@ groups: - id: 1.2.25 text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" type: "skip" tests: test_items: @@ -736,7 +736,7 @@ groups: - id: 1.2.26 text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep -m1 'Running kube-apiserver'" + audit: "journalctl -m -u k3s | grep -m1 'Running kube-apiserver'" tests: bin_op: and test_items: @@ -754,7 +754,7 @@ groups: - id: 1.2.27 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" tests: bin_op: and test_items: @@ -772,7 +772,7 @@ groups: - id: 1.2.28 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" tests: test_items: - flag: "--client-ca-file" @@ -785,7 +785,7 @@ groups: - id: 1.2.29 text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" tests: test_items: - flag: "--etcd-cafile" @@ -798,7 +798,7 @@ groups: - id: 1.2.30 text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" tests: test_items: - flag: "--encryption-provider-config" @@ -820,7 +820,7 @@ groups: - id: 1.2.32 text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" tests: test_items: - flag: "--tls-cipher-suites" @@ -845,7 +845,7 @@ groups: checks: - id: 1.3.1 text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" tests: test_items: - flag: "--terminated-pod-gc-threshold" @@ -857,7 +857,7 @@ groups: - id: 1.3.2 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" tests: test_items: - flag: "--profiling" @@ -872,7 +872,7 @@ groups: - id: 1.3.3 text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" tests: test_items: - flag: "--use-service-account-credentials" @@ -887,7 +887,7 @@ groups: - id: 1.3.4 text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" tests: test_items: - flag: "--service-account-private-key-file" @@ -900,7 +900,7 @@ groups: - id: 1.3.5 text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" tests: test_items: - flag: "--root-ca-file" @@ -912,7 +912,7 @@ groups: - id: 1.3.6 text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'" type: "skip" tests: bin_op: or @@ -953,7 +953,7 @@ groups: checks: - id: 1.4.1 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1" tests: test_items: - flag: "--profiling" @@ -969,7 +969,7 @@ groups: - id: 1.4.2 text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" tests: bin_op: or test_items: diff --git a/cfg/k3s-cis-1.23/node.yaml b/cfg/k3s-cis-1.23/node.yaml index c0b60dfe2..9c20b229e 100644 --- a/cfg/k3s-cis-1.23/node.yaml +++ b/cfg/k3s-cis-1.23/node.yaml @@ -186,7 +186,7 @@ groups: checks: - id: 4.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' tests: test_items: - flag: "--anonymous-auth" @@ -209,7 +209,7 @@ groups: - id: 4.2.2 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' ' tests: test_items: - flag: --authorization-mode @@ -231,7 +231,7 @@ groups: - id: 4.2.3 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' tests: test_items: - flag: --client-ca-file @@ -251,7 +251,7 @@ groups: - id: 4.2.4 text: "Ensure that the --read-only-port argument is set to 0 (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' " + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' " tests: bin_op: or test_items: @@ -276,7 +276,7 @@ groups: - id: 4.2.5 text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'" tests: test_items: - flag: --streaming-connection-idle-timeout @@ -302,7 +302,7 @@ groups: - id: 4.2.6 text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults'" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults'" type: "skip" tests: test_items: @@ -325,7 +325,7 @@ groups: - id: 4.2.7 text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'" type: "skip" tests: test_items: @@ -393,7 +393,7 @@ groups: - id: 4.2.10 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1" + audit: "journalctl -m -u k3s | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --tls-cert-file @@ -477,7 +477,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/k3s-cis-1.24/config.yaml b/cfg/k3s-cis-1.24/config.yaml index cafe7019a..4adbac3a1 100644 --- a/cfg/k3s-cis-1.24/config.yaml +++ b/cfg/k3s-cis-1.24/config.yaml @@ -16,33 +16,43 @@ master: scheduler: bins: - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig controllermanager: bins: - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/controller.kubeconfig + etcd: bins: - containerd - datadirs: - - /var/lib/rancher/k3s/server/db/etcd - - node: - components: - - kubelet - - proxy - - kubelet: - bins: - - containerd - defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig - defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt - - proxy: - bins: - - containerd - defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig - - policies: - components: - - policies + +etcd: + components: + - etcd + + etcd: + confs: /var/lib/rancher/k3s/server/db/etcd/config + +node: + components: + - kubelet + - proxy + + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + +policies: + components: + - policies diff --git a/cfg/k3s-cis-1.24/controlplane.yaml b/cfg/k3s-cis-1.24/controlplane.yaml index fd9766efb..471017bd7 100644 --- a/cfg/k3s-cis-1.24/controlplane.yaml +++ b/cfg/k3s-cis-1.24/controlplane.yaml @@ -21,7 +21,7 @@ groups: checks: - id: 3.2.1 text: "Ensure that a minimal audit policy is created (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" tests: test_items: - flag: "--audit-policy-file" diff --git a/cfg/k3s-cis-1.24/etcd.yaml b/cfg/k3s-cis-1.24/etcd.yaml index 40af92e65..cb57781c0 100644 --- a/cfg/k3s-cis-1.24/etcd.yaml +++ b/cfg/k3s-cis-1.24/etcd.yaml @@ -10,128 +10,135 @@ groups: checks: - id: 2.1 text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" - audit: "grep -A 4 'client-transport-security' $etcdconf | grep -E 'cert-file|key-file'" + audit_config: "cat $etcdconf" tests: bin_op: and test_items: - - flag: "cert-file" - set: true - - flag: "key-file" - set: true + - path: "{.client-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt" + - path: "{.client-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key" remediation: | - Follow the etcd service documentation and configure TLS encryption. - Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml - on the master node and set the below parameters. - --cert-file= - --key-file= - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom cert and key files. + scored: false - id: 2.2 text: "Ensure that the --client-cert-auth argument is set to true (Automated)" - audit: "grep -A 4 'client-transport-security' $etcdconf | grep 'client-cert-auth'" + audit_config: "cat $etcdconf" tests: - bin_op: or test_items: - - flag: "--client-cert-auth" - set: true - - flag: "client-cert-auth" + - path: "{.client-transport-security.client-cert-auth}" compare: op: eq value: true - set: true remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --client-cert-auth="true" - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable client certificate authentication. + scored: false - id: 2.3 text: "Ensure that the --auto-tls argument is not set to true (Automated)" - audit: "if grep -q '^auto-tls' $etcdconf;then grep '^auto-tls' $etcdconf;else echo 'notset';fi" + audit_config: "cat $etcdconf" tests: bin_op: or test_items: - - flag: "--auto-tls" - set: false - - flag: "--auto-tls" + - path: "{.client-transport-security.auto-tls}" compare: op: eq value: false + - path: "{.client-transport-security.auto-tls}" + set: false remediation: | - Edit the etcd pod specification file $etcdconf on the master + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - scored: true + client-transport-security: + auto-tls: false + scored: false - id: 2.4 text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" - audit: "grep -A 4 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file'" + audit_config: "cat $etcdconf" tests: bin_op: and test_items: - - flag: "cert-file" - set: true - - flag: "key-file" - set: true + - path: "{.peer-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt" + - path: "{.peer-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key" remediation: | - Follow the etcd service documentation and configure peer TLS encryption as appropriate - for your etcd cluster. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameters. - --peer-client-file= - --peer-key-file= - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates peer cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom peer cert and key files. + scored: false - id: 2.5 text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" - audit: "grep -A 4 'peer-transport-security' $etcdconf | grep 'client-cert-auth'" + audit_config: "cat $etcdconf" tests: - bin_op: or test_items: - - flag: "--client-cert-auth" - set: true - - flag: "client-cert-auth" + - path: "{.peer-transport-security.client-cert-auth}" compare: op: eq value: true - set: true remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --peer-client-cert-auth=true - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable peer client certificate authentication. + scored: false - id: 2.6 text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" - audit: "if grep -q '^peer-auto-tls' $etcdconf;then grep '^peer-auto-tls' $etcdconf;else echo 'notset';fi" + audit_config: "cat $etcdconf" tests: bin_op: or test_items: - - flag: "--peer-auto-tls" - set: false - - flag: "--peer-auto-tls" + - path: "{.peer-transport-security.auto-tls}" compare: op: eq value: false - set: true + - path: "{.peer-transport-security.auto-tls}" + set: false remediation: | - Edit the etcd pod specification file $etcdconf on the master + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master node and either remove the --peer-auto-tls parameter or set it to false. - --peer-auto-tls=false - scored: true + peer-transport-security: + auto-tls: false + scored: false - id: 2.7 text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" - audit: "if grep -q 'trusted-ca-file' $etcdconf;then grep 'trusted-ca-file' $etcdconf;else echo 'notset';fi" + audit_config: "cat $etcdconf" tests: test_items: - - flag: "trusted-ca-file" - set: true + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt" remediation: | - [Manual test] - Follow the etcd documentation and create a dedicated certificate authority setup for the - etcd service. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameter. - --trusted-ca-file= + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates a unique certificate authority for etcd. + This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use a shared certificate authority. scored: false diff --git a/cfg/k3s-cis-1.24/master.yaml b/cfg/k3s-cis-1.24/master.yaml index 6af44c7a5..5ff0318c9 100644 --- a/cfg/k3s-cis-1.24/master.yaml +++ b/cfg/k3s-cis-1.24/master.yaml @@ -19,9 +19,8 @@ groups: op: bitmask value: "644" remediation: | - Run the below command (based on the file location on your system) on the - control plane node. - For example, chmod 644 $apiserverconf + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. scored: true - id: 1.1.2 @@ -32,8 +31,8 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $apiserverconf + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. scored: true - id: 1.1.3 @@ -47,8 +46,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $controllermanagerconf + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. scored: true - id: 1.1.4 @@ -59,8 +58,8 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $controllermanagerconf + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. scored: true - id: 1.1.5 @@ -74,8 +73,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $schedulerconf + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. scored: true - id: 1.1.6 @@ -86,8 +85,8 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $schedulerconf + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. scored: true - id: 1.1.7 @@ -101,9 +100,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $etcdconf + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. scored: true - id: 1.1.8 @@ -114,17 +112,14 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $etcdconf + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. scored: true - id: 1.1.9 text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated)" + audit: find /var/lib/cni/networks -type f ! -name lock 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a type: "skip" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a use_multiple_values: true tests: test_items: @@ -133,36 +128,37 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 + Not Applicable. + The default K3s CNI, flannel, does not create any files in /var/lib/cni/networks. scored: false - id: 1.1.10 text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)" + audit: find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G\ type: "skip" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G use_multiple_values: true tests: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root - scored: false + Not Applicable. + The default K3s CNI, flannel, does not create any files in /var/lib/cni/networks. + scored: true - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: "stat -c %a $etcddatadir" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd + else + echo "permissions=700" + fi tests: test_items: - - flag: "700" + - flag: "permissions" compare: - op: eq + op: bitmask value: "700" - set: true remediation: | On the etcd server node, get the etcd data directory, passed as an argument --data-dir, from the command 'ps -ef | grep etcd'. @@ -178,15 +174,14 @@ groups: test_items: - flag: "etcd:etcd" remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir, - from the command 'ps -ef | grep etcd'. - Run the below command (based on the etcd data directory found above). - For example, chown etcd:etcd /var/lib/etcd + Not Applicable. + For K3s, etcd is embedded within the k3s process. There is no separate etcd process. + Therefore the etcd data directory ownership is managed by the k3s process and should be root:root. scored: true - id: 1.1.13 text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig'" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" type: "skip" tests: test_items: @@ -212,12 +207,12 @@ groups: set: true remediation: | Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root /etc/kubernetes/admin.conf + For example, chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig scored: true - id: 1.1.15 text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" tests: test_items: - flag: "permissions" @@ -232,7 +227,7 @@ groups: - id: 1.1.16 text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" tests: test_items: - flag: "root:root" @@ -244,7 +239,7 @@ groups: - id: 1.1.17 text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi'" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" tests: test_items: - flag: "permissions" @@ -287,8 +282,8 @@ groups: scored: true - id: 1.1.20 - text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Automated)" - audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.crt" + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.crt'" use_multiple_values: true tests: test_items: @@ -297,14 +292,14 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. + Run the below command (based on the file location on your system) on the master node. For example, - chmod -R 600 /etc/kubernetes/pki/*.crt + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt scored: false - id: 1.1.21 text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" - audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.key" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key'" use_multiple_values: true tests: test_items: @@ -313,17 +308,17 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. + Run the below command (based on the file location on your system) on the master node. For example, - chmod -R 600 /etc/kubernetes/pki/*.key - scored: false + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key + scored: true - id: 1.2 text: "API Server" checks: - id: 1.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" tests: test_items: - flag: "--anonymous-auth" @@ -331,27 +326,29 @@ groups: op: eq value: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --anonymous-auth=false - scored: false + By default, K3s sets the --anonymous-auth argument to false. If it is set to true, + edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "anonymous-auth=true" + scored: true - id: 1.2.2 text: "Ensure that the --token-auth-file parameter is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--token-auth-file" set: false remediation: | - Follow the documentation and configure alternate mechanisms for authentication. Then, - edit the API server pod specification file $apiserverconf - on the control plane node and remove the --token-auth-file= parameter. + Follow the documentation and configure alternate mechanisms for authentication. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "token-auth-file=" scored: true - id: 1.2.3 text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -359,18 +356,18 @@ groups: compare: op: nothave value: "DenyServiceExternalIPs" - set: true - flag: "--enable-admission-plugins" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and remove the `DenyServiceExternalIPs` - from enabled admission plugins. + By default, K3s does not set DenyServiceExternalIPs. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" scored: true - id: 1.2.4 text: "Ensure that the --kubelet-https argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-https'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" type: "skip" tests: bin_op: or @@ -388,24 +385,25 @@ groups: - id: 1.2.5 text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" tests: bin_op: and test_items: - flag: "--kubelet-client-certificate" - flag: "--kubelet-client-key" remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and kubelets. Then, edit API server pod specification file - $apiserverconf on the control plane node and set the - kubelet client certificate and key parameters as below. - --kubelet-client-certificate= - --kubelet-client-key= + By default, K3s automatically provides the kubelet client certificate and key. + They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" scored: true - id: 1.2.6 text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" tests: test_items: - flag: "--kubelet-certificate-authority" @@ -419,7 +417,7 @@ groups: - id: 1.2.7 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -427,15 +425,15 @@ groups: op: nothave value: "AlwaysAllow" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. - One such example could be as below. - --authorization-mode=RBAC + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" scored: true - id: 1.2.8 text: "Ensure that the --authorization-mode argument includes Node (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -443,14 +441,14 @@ groups: op: has value: "Node" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes Node. - --authorization-mode=Node,RBAC + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. scored: true - id: 1.2.9 text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -458,14 +456,14 @@ groups: op: has value: "RBAC" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, - for example `--authorization-mode=Node,RBAC`. + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. scored: true - id: 1.2.10 - text: "Ensure that the admission control plugin EventRateLimit is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -474,15 +472,15 @@ groups: value: "EventRateLimit" remediation: | Follow the Kubernetes documentation and set the desired limits in a configuration file. - Then, edit the API server pod specification file $apiserverconf - and set the below parameters. - --enable-admission-plugins=...,EventRateLimit,... - --admission-control-config-file= + Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters. + kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" scored: false - id: 1.2.11 text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: bin_op: or test_items: @@ -493,14 +491,15 @@ groups: - flag: "--enable-admission-plugins" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a - value that does not include AlwaysAdmit. + By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" scored: true - id: 1.2.12 - text: "Ensure that the admission control plugin AlwaysPullImages is set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -508,15 +507,18 @@ groups: op: has value: "AlwaysPullImages" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - AlwaysPullImages. - --enable-admission-plugins=...,AlwaysPullImages,... + Permissive, per CIS guidelines, + "This setting could impact offline or isolated clusters, which have images pre-loaded and + do not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter. + kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." scored: false - id: 1.2.13 - text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: bin_op: or test_items: @@ -537,7 +539,7 @@ groups: - id: 1.2.14 text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'ServiceAccount'" tests: bin_op: or test_items: @@ -548,15 +550,16 @@ groups: - flag: "--disable-admission-plugins" set: false remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. Follow the documentation and create ServiceAccount objects as per your environment. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and ensure that the --disable-admission-plugins parameter is set to a - value that does not include ServiceAccount. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" scored: true - id: 1.2.15 text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -567,14 +570,15 @@ groups: - flag: "--disable-admission-plugins" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --disable-admission-plugins parameter to - ensure it does not include NamespaceLifecycle. + By default, K3s does not set the --disable-admission-plugins to anything. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." scored: true - id: 1.2.16 text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -582,16 +586,16 @@ groups: op: has value: "NodeRestriction" remediation: | - Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to a - value that includes NodeRestriction. - --enable-admission-plugins=...,NodeRestriction,... + By default, K3s sets the --enable-admission-plugins to NodeRestriction. + If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins. + If you are, include NodeRestriction in the list. + kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." scored: true - id: 1.2.17 text: "Ensure that the --secure-port argument is not set to 0 (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" tests: bin_op: or test_items: @@ -602,14 +606,15 @@ groups: - flag: "--secure-port" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --secure-port parameter or - set it to a different (non-zero) desired port. + By default, K3s sets the secure port to 6444. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "secure-port=" scored: true - id: 1.2.18 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" tests: test_items: - flag: "--profiling" @@ -617,29 +622,28 @@ groups: op: eq value: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --profiling=false + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "profiling=true" scored: true - id: 1.2.19 - text: "Ensure that the --audit-log-path argument is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-path'" tests: test_items: - flag: "--audit-log-path" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-path parameter to a suitable path and + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and file where you would like audit logs to be written, for example, - --audit-log-path=/var/log/apiserver/audit.log - scored: true + kube-apiserver-arg: + - "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log" + scored: false - id: 1.2.20 - text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxage'" tests: test_items: - flag: "--audit-log-maxage" @@ -647,16 +651,15 @@ groups: op: gte value: 30 remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxage parameter to 30 - or as an appropriate number of days, for example, - --audit-log-maxage=30 - scored: true + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example, + kube-apiserver-arg: + - "audit-log-maxage=30" + scored: false - id: 1.2.21 - text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxbackup'" tests: test_items: - flag: "--audit-log-maxbackup" @@ -664,16 +667,15 @@ groups: op: gte value: 10 remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate - value. For example, - --audit-log-maxbackup=10 - scored: true + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example, + kube-apiserver-arg: + - "audit-log-maxbackup=10" + scored: false - id: 1.2.22 - text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-log-maxsize'" tests: test_items: - flag: "--audit-log-maxsize" @@ -681,27 +683,33 @@ groups: op: gte value: 100 remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. - For example, to set it as 100 MB, --audit-log-maxsize=100 - scored: true + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxsize parameter to an appropriate size in MB. For example, + kube-apiserver-arg: + - "audit-log-maxsize=100" + scored: false - id: 1.2.23 - text: "Ensure that the --request-timeout argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'request-timeout'" tests: + bin_op: or test_items: - flag: "--request-timeout" + set: false + - flag: "--request-timeout" remediation: | - Edit the API server pod specification file $apiserverconf - and set the below parameter as appropriate and if needed. - For example, --request-timeout=300s - scored: true + Permissive, per CIS guidelines, + "it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". + Edit the K3s config file /etc/rancher/k3s/config.yaml + and set the below parameter if needed. For example, + kube-apiserver-arg: + - "request-timeout=300s" + scored: false - id: 1.2.24 text: "Ensure that the --service-account-lookup argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -712,30 +720,36 @@ groups: op: eq value: true remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --service-account-lookup=true - Alternatively, you can delete the --service-account-lookup parameter from this file so + By default, K3s does not set the --service-account-lookup argument. + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example, + kube-apiserver-arg: + - "service-account-lookup=true" + Alternatively, you can delete the service-account-lookup parameter from this file so that the default takes effect. scored: true - id: 1.2.25 text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'service-account-key-file'" tests: test_items: - flag: "--service-account-key-file" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --service-account-key-file parameter - to the public key file for service accounts. For example, - --service-account-key-file= + K3s automatically generates and sets the service account key file. + It is located at /var/lib/rancher/k3s/server/tls/service.key. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "service-account-key-file=" scored: true - id: 1.2.26 text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep -m1 'Running kube-apiserver'" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + journalctl -m -u k3s | grep -m1 'Running kube-apiserver' | tail -n1 + else + echo "--etcd-certfile AND --etcd-keyfile" + fi tests: bin_op: and test_items: @@ -744,16 +758,17 @@ groups: - flag: "--etcd-keyfile" set: true remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate and key file parameters. - --etcd-certfile= - --etcd-keyfile= + K3s automatically generates and sets the etcd certificate and key files. + They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" scored: true - id: 1.2.27 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" tests: bin_op: and test_items: @@ -762,57 +777,61 @@ groups: - flag: "--tls-private-key-file" set: true remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the TLS certificate and private key file parameters. - --tls-cert-file= - --tls-private-key-file= + By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver. + They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" scored: true - id: 1.2.28 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" tests: test_items: - flag: "--client-ca-file" remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the client certificate authority file. - --client-ca-file= + By default, K3s automatically provides the client certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "client-ca-file=" scored: true - id: 1.2.29 text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" tests: test_items: - flag: "--etcd-cafile" remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate authority file parameter. - --etcd-cafile= + By default, K3s automatically provides the etcd certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-cafile=" scored: true - id: 1.2.30 - text: "Ensure that the --encryption-provider-config argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" tests: test_items: - flag: "--encryption-provider-config" remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --encryption-provider-config parameter to the path of that file. - For example, --encryption-provider-config= + K3s can be configured to use encryption providers to encrypt secrets at rest. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json. scored: false - id: 1.2.31 - text: "Ensure that encryption providers are appropriately configured (Automated)" + text: "Ensure that encryption providers are appropriately configured (Manual)" audit: | - ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') - if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + ENCRYPTION_PROVIDER_CONFIG=$(journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi tests: test_items: - flag: "provider" @@ -820,13 +839,16 @@ groups: op: valid_elements value: "aescbc,kms,secretbox" remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - In this file, choose aescbc, kms or secretbox as the encryption provider. + K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json scored: false - id: 1.2.32 text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" tests: test_items: - flag: "--tls-cipher-suites" @@ -834,36 +856,32 @@ groups: op: valid_elements value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" remediation: | - Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml - on the control plane node and set the below parameter. - --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, - TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 - scored: false + By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. + If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. + If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following: + kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + scored: true - id: 1.3 text: "Controller Manager" checks: - id: 1.3.1 - text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" tests: test_items: - flag: "--terminated-pod-gc-threshold" remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, - for example, --terminated-pod-gc-threshold=10 + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node + and set the --terminated-pod-gc-threshold to an appropriate threshold, + kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" scored: false - id: 1.3.2 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" tests: test_items: - flag: "--profiling" @@ -871,14 +889,15 @@ groups: op: eq value: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the below parameter. - --profiling=false + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "profiling=true" scored: true - id: 1.3.3 text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" tests: test_items: - flag: "--use-service-account-credentials" @@ -886,40 +905,43 @@ groups: op: noteq value: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node to set the below parameter. - --use-service-account-credentials=true + By default, K3s sets the --use-service-account-credentials argument to true. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "use-service-account-credentials=false" scored: true - id: 1.3.4 text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" tests: test_items: - flag: "--service-account-private-key-file" remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --service-account-private-key-file parameter - to the private key file for service accounts. - --service-account-private-key-file= + By default, K3s automatically provides the service account private key file. + It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "service-account-private-key-file=" scored: true - id: 1.3.5 text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" tests: test_items: - flag: "--root-ca-file" remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. - --root-ca-file= + By default, K3s automatically provides the root CA file. + It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "root-ca-file=" scored: true - id: 1.3.6 text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'" - type: "skip" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1" tests: bin_op: or test_items: @@ -931,14 +953,16 @@ groups: - flag: "--feature-gates" set: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. - --feature-gates=RotateKubeletServerCertificate=true + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" scored: true - id: 1.3.7 text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "/bin/ps -ef | grep $controllermanagerbin | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'bind-address'" tests: bin_op: or test_items: @@ -950,8 +974,10 @@ groups: - flag: "--bind-address" set: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and ensure the correct value for the --bind-address parameter + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "bind-address=" scored: true - id: 1.4 @@ -959,7 +985,7 @@ groups: checks: - id: 1.4.1 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1" tests: test_items: - flag: "--profiling" @@ -968,14 +994,15 @@ groups: value: false set: true remediation: | - Edit the Scheduler pod specification file $schedulerconf file - on the control plane node and set the below parameter. - --profiling=false + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "profiling=true" scored: true - id: 1.4.2 text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" tests: bin_op: or test_items: @@ -987,6 +1014,8 @@ groups: - flag: "--bind-address" set: false remediation: | - Edit the Scheduler pod specification file $schedulerconf - on the control plane node and ensure the correct value for the --bind-address parameter + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "bind-address=" scored: true diff --git a/cfg/k3s-cis-1.24/node.yaml b/cfg/k3s-cis-1.24/node.yaml index 82ddff0fb..a9f1e0386 100644 --- a/cfg/k3s-cis-1.24/node.yaml +++ b/cfg/k3s-cis-1.24/node.yaml @@ -19,8 +19,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chmod 600 $kubeletsvc + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. scored: true - id: 4.1.2 @@ -31,14 +31,13 @@ groups: test_items: - flag: root:root remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. scored: true - id: 4.1.3 - text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" - audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' tests: bin_op: or test_items: @@ -51,11 +50,11 @@ groups: Run the below command (based on the file location on your system) on the each worker node. For example, chmod 600 $proxykubeconfig - scored: false + scored: true - id: 4.1.4 - text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi'' ' + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: 'stat -c %U:%G $proxykubeconfig' tests: bin_op: or test_items: @@ -63,18 +62,17 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig - scored: false + scored: true - id: 4.1.5 text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" - audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubelet.kubeconfig ' + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' tests: test_items: - - flag: "600" + - flag: "permissions" compare: - op: eq + op: bitmask value: "600" - set: true remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, @@ -83,7 +81,7 @@ groups: - id: 4.1.6 text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" - audit: 'stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig' + audit: 'stat -c %U:%G $kubeletkubeconfig' tests: test_items: - flag: "root:root" @@ -98,8 +96,8 @@ groups: scored: true - id: 4.1.7 - text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" - audit: "stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt" + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a $kubeletcafile" tests: test_items: - flag: "permissions" @@ -109,22 +107,25 @@ groups: set: true remediation: | Run the following command to modify the file permissions of the - --client-ca-file chmod 600 - scored: false + --client-ca-file chmod 600 $kubeletcafile + scored: true - id: 4.1.8 - text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" - audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt" + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $kubeletcafile" tests: test_items: - flag: root:root + compare: + op: eq + value: root:root remediation: | Run the following command to modify the ownership of the --client-ca-file. - chown root:root - scored: false + chown root:root $kubeletcafile + scored: true - id: 4.1.9 - text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Manual)" + text: "If the kubelet config.yaml configuration file is being used validate permissions set to 600 or more restrictive (Automated)" audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' type: "skip" tests: @@ -134,20 +135,20 @@ groups: op: bitmask value: "600" remediation: | - Run the following command (using the config file location identified in the Audit step) - chmod 600 $kubeletconf + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. scored: true - id: 4.1.10 - text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Manual)" + text: "If the kubelet config.yaml configuration file is being used validate file ownership is set to root:root (Automated)" audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' type: "skip" tests: test_items: - flag: root:root remediation: | - Run the following command (using the config file location identified in the Audit step) - chown root:root $kubeletconf + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. scored: true - id: 4.2 @@ -155,7 +156,7 @@ groups: checks: - id: 4.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' tests: test_items: - flag: "--anonymous-auth" @@ -165,20 +166,20 @@ groups: value: false set: true remediation: | - If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to - `false`. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - `--anonymous-auth=false` - Based on your system, restart the kubelet service. For example, + By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you + should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "anonymous-auth=true" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="anonymous-auth=true" + Based on your system, restart the k3s service. For example, systemctl daemon-reload - systemctl restart kubelet.service + systemctl restart k3s.service scored: true - id: 4.2.2 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' ' tests: test_items: - flag: --authorization-mode @@ -188,39 +189,33 @@ groups: value: AlwaysAllow set: true remediation: | - If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If - using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example, + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "authorization-mode=AlwaysAllow" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="authorization-mode=AlwaysAllow" + Based on your system, restart the k3s service. For example, systemctl daemon-reload - systemctl restart kubelet.service + systemctl restart k3s.service scored: true - id: 4.2.3 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' tests: test_items: - flag: --client-ca-file path: '{.authentication.x509.clientCAFile}' set: true remediation: | - If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --client-ca-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + By default, K3s automatically provides the client ca certificate for the Kubelet. + It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt scored: true - id: 4.2.4 - text: "Verify that the --read-only-port argument is set to 0 (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' " + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: bin_op: or test_items: @@ -233,19 +228,20 @@ groups: path: '{.readOnlyPort}' set: false remediation: | - If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --read-only-port=0 - Based on your system, restart the kubelet service. For example, + By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you + should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "read-only-port=XXXX" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="read-only-port=XXXX" + Based on your system, restart the k3s service. For example, systemctl daemon-reload - systemctl restart kubelet.service - scored: false + systemctl restart k3s.service + scored: true - id: 4.2.5 text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --streaming-connection-idle-timeout @@ -258,21 +254,17 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a - value other than 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --streaming-connection-idle-timeout=5m - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "streaming-connection-idle-timeout=5m" + If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: false - id: 4.2.6 text: "Ensure that the --protect-kernel-defaults argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'protect-kernel-defaults'" - type: "skip" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --protect-kernel-defaults @@ -282,20 +274,16 @@ groups: value: true set: true remediation: | - If using a Kubelet config file, edit the file to set `protectKernelDefaults` to `true`. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --protect-kernel-defaults=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. + protect-kernel-defaults: true + If using the command line, run K3s with --protect-kernel-defaults=true. + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: true - id: 4.2.7 text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'" - type: "skip" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --make-iptables-util-chains @@ -309,39 +297,31 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove the --make-iptables-util-chains argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. + kubelet-arg: + - "make-iptables-util-chains=true" + If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: true - id: 4.2.8 - text: "Ensure that the --hostname-override argument is not set (Manual)" - # This is one of those properties that can only be set as a command line argument. - # To check if the property is set as expected, we need to parse the kubelet command - # instead reading the Kubelet Configuration file. - audit: "/bin/ps -fC $kubeletbin " + text: "Ensure that the --hostname-override argument is not set (Automated)" type: "skip" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --hostname-override set: false remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and remove the --hostname-override argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false + Not Applicable. + By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply + with cloud providers that require this flag to ensure that hostname matches node names. + scored: true - id: 4.2.9 text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" - audit: "/bin/ps -fC $kubeletbin" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/cat $kubeletconf" tests: test_items: @@ -351,18 +331,18 @@ groups: op: eq value: 0 remediation: | - If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + By default, K3s sets the event-qps to 0. Should you wish to change this, + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "event-qps=" + If using the command line, run K3s with --kubelet-arg="event-qps=". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: false - id: 4.2.10 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1" + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --tls-cert-file @@ -370,23 +350,19 @@ groups: - flag: --tls-private-key-file path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' remediation: | - If using a Kubelet config file, edit the file to set `tlsCertFile` to the location - of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` - to the location of the corresponding private key file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameters in KUBELET_CERTIFICATE_ARGS variable. - --tls-cert-file= - --tls-private-key-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false + By default, K3s automatically provides the TLS certificate and private key for the Kubelet. + They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true - id: 4.2.11 text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: test_items: - flag: --rotate-certificates @@ -399,21 +375,16 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or - remove it altogether to use the default value. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS - variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter. + If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: true - id: 4.2.12 - text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" - audit_config: "/bin/cat $kubeletconf" + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: bin_op: or test_items: @@ -426,17 +397,17 @@ groups: path: '{.featureGates.RotateKubeletServerCertificate}' set: false remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. - --feature-gates=RotateKubeletServerCertificate=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - scored: false + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter. + If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true - id: 4.2.13 text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "/bin/ps -fC $kubeletbin" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/cat $kubeletconf" tests: test_items: @@ -446,14 +417,11 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `tlsCipherSuites` to + kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" or to a subset of these values. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the --tls-cipher-suites parameter as follows, or to a subset of these values. - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service + If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=" + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: false diff --git a/cfg/k3s-cis-1.24/policies.yaml b/cfg/k3s-cis-1.24/policies.yaml index 33890fd57..4a4f9887b 100644 --- a/cfg/k3s-cis-1.24/policies.yaml +++ b/cfg/k3s-cis-1.24/policies.yaml @@ -152,8 +152,8 @@ groups: text: "Minimize the admission of containers with capabilities assigned (Manual)" type: "manual" remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applicaions which do not require any Linux capabities to operate consider adding + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities. scored: false diff --git a/cfg/k3s-cis-1.7/config.yaml b/cfg/k3s-cis-1.7/config.yaml index e9574b035..ed8124f08 100644 --- a/cfg/k3s-cis-1.7/config.yaml +++ b/cfg/k3s-cis-1.7/config.yaml @@ -23,32 +23,42 @@ master: scheduler: bins: - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig controllermanager: bins: - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/controller.kubeconfig etcd: bins: - containerd - datadirs: - - /var/lib/rancher/k3s/server/db/etcd - node: - components: - - kubelet - - proxy - - kubelet: - bins: - - containerd - defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig - defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt - - proxy: - bins: - - containerd - defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig - - policies: - components: - - policies + +etcd: + components: + - etcd + + etcd: + confs: /var/lib/rancher/k3s/server/db/etcd/config + +node: + components: + - kubelet + - proxy + + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig + +policies: + components: + - policies diff --git a/cfg/k3s-cis-1.7/controlplane.yaml b/cfg/k3s-cis-1.7/controlplane.yaml index fa63febda..b23a04a15 100644 --- a/cfg/k3s-cis-1.7/controlplane.yaml +++ b/cfg/k3s-cis-1.7/controlplane.yaml @@ -35,7 +35,7 @@ groups: checks: - id: 3.2.1 text: "Ensure that a minimal audit policy is created (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" tests: test_items: - flag: "--audit-policy-file" diff --git a/cfg/k3s-cis-1.7/etcd.yaml b/cfg/k3s-cis-1.7/etcd.yaml index d29818148..004812013 100644 --- a/cfg/k3s-cis-1.7/etcd.yaml +++ b/cfg/k3s-cis-1.7/etcd.yaml @@ -10,128 +10,135 @@ groups: checks: - id: 2.1 text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" - audit: "grep -A 4 'client-transport-security' $etcdconf | grep -E 'cert-file|key-file'" + audit_config: "cat $etcdconf" tests: bin_op: and test_items: - - flag: "cert-file" - set: true - - flag: "key-file" - set: true + - path: "{.client-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt" + - path: "{.client-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key" remediation: | - Follow the etcd service documentation and configure TLS encryption. - Then, edit the etcd pod specification file /etc/kubernetes/manifests/etcd.yaml - on the master node and set the below parameters. - --cert-file= - --key-file= - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom cert and key files. + scored: false - id: 2.2 text: "Ensure that the --client-cert-auth argument is set to true (Automated)" - audit: "grep -A 4 'client-transport-security' $etcdconf | grep 'client-cert-auth'" + audit_config: "cat $etcdconf" tests: - bin_op: or test_items: - - flag: "--client-cert-auth" - set: true - - flag: "client-cert-auth" + - path: "{.client-transport-security.client-cert-auth}" compare: op: eq value: true - set: true remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --client-cert-auth="true" - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable client certificate authentication. + scored: false - id: 2.3 text: "Ensure that the --auto-tls argument is not set to true (Automated)" - audit: "if grep -q '^auto-tls' $etcdconf;then grep '^auto-tls' $etcdconf;else echo 'notset';fi" + audit_config: "cat $etcdconf" tests: bin_op: or test_items: - - flag: "--auto-tls" - set: false - - flag: "--auto-tls" + - path: "{.client-transport-security.auto-tls}" compare: op: eq value: false + - path: "{.client-transport-security.auto-tls}" + set: false remediation: | - Edit the etcd pod specification file $etcdconf on the master + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master node and either remove the --auto-tls parameter or set it to false. - --auto-tls=false - scored: true + client-transport-security: + auto-tls: false + scored: false - id: 2.4 text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" - audit: "grep -A 4 'peer-transport-security' $etcdconf | grep -E 'cert-file|key-file'" + audit_config: "cat $etcdconf" tests: bin_op: and test_items: - - flag: "cert-file" - set: true - - flag: "key-file" - set: true + - path: "{.peer-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt" + - path: "{.peer-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key" remediation: | - Follow the etcd service documentation and configure peer TLS encryption as appropriate - for your etcd cluster. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameters. - --peer-client-file= - --peer-key-file= - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates peer cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom peer cert and key files. + scored: false - id: 2.5 text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" - audit: "grep -A 4 'peer-transport-security' $etcdconf | grep 'client-cert-auth'" + audit_config: "cat $etcdconf" tests: - bin_op: or test_items: - - flag: "--client-cert-auth" - set: true - - flag: "client-cert-auth" + - path: "{.peer-transport-security.client-cert-auth}" compare: op: eq value: true - set: true remediation: | - Edit the etcd pod specification file $etcdconf on the master - node and set the below parameter. - --peer-client-cert-auth=true - scored: true + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable peer client certificate authentication. + scored: false - id: 2.6 text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" - audit: "if grep -q '^peer-auto-tls' $etcdconf;then grep '^peer-auto-tls' $etcdconf;else echo 'notset';fi" + audit_config: "cat $etcdconf" tests: bin_op: or test_items: - - flag: "--peer-auto-tls" - set: false - - flag: "--peer-auto-tls" + - path: "{.peer-transport-security.auto-tls}" compare: op: eq value: false - set: true + - path: "{.peer-transport-security.auto-tls}" + set: false remediation: | - Edit the etcd pod specification file $etcdconf on the master + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master node and either remove the --peer-auto-tls parameter or set it to false. - --peer-auto-tls=false - scored: true + peer-transport-security: + auto-tls: false + scored: false - id: 2.7 text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" - audit: "if grep -q 'trusted-ca-file' $etcdconf;then grep 'trusted-ca-file' $etcdconf;else echo 'notset';fi" + audit_config: "cat $etcdconf" tests: test_items: - - flag: "trusted-ca-file" - set: true + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt" remediation: | - [Manual test] - Follow the etcd documentation and create a dedicated certificate authority setup for the - etcd service. - Then, edit the etcd pod specification file $etcdconf on the - master node and set the below parameter. - --trusted-ca-file= + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates a unique certificate authority for etcd. + This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use a shared certificate authority. scored: false diff --git a/cfg/k3s-cis-1.7/master.yaml b/cfg/k3s-cis-1.7/master.yaml index 109b8d84e..9e2ef0880 100644 --- a/cfg/k3s-cis-1.7/master.yaml +++ b/cfg/k3s-cis-1.7/master.yaml @@ -19,10 +19,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the - control plane node. - For example, chmod 600 $apiserverconf Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. scored: true - id: 1.1.2 @@ -33,9 +31,8 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $apiserverconf Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. scored: true - id: 1.1.3 @@ -49,9 +46,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $controllermanagerconf Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. scored: true - id: 1.1.4 @@ -62,9 +58,8 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $controllermanagerconf Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. scored: true - id: 1.1.5 @@ -78,9 +73,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 $schedulerconf Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. scored: true - id: 1.1.6 @@ -91,9 +85,8 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root $schedulerconf Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. scored: true - id: 1.1.7 @@ -108,10 +101,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chmod 600 $etcdconf Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. scored: true - id: 1.1.8 @@ -123,18 +114,13 @@ groups: test_items: - flag: "root:root" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, - chown root:root $etcdconf Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. scored: true - id: 1.1.9 - text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated)" - type: "skip" - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c permissions=%a - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Manual)" + audit: find /var/lib/cni/networks -type f ! -name lock 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a use_multiple_values: true tests: test_items: @@ -143,17 +129,15 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. - For example, chmod 600 - Not Applicable. + By default, K3s sets the CNI file permissions to 644. + Note that for many CNIs, a lock file is created with permissions 750. This is expected and can be ignored. + If you modify your CNI configuration, ensure that the permissions are set to 600. + For example, chmod 600 /var/lib/cni/networks/ scored: false - id: 1.1.10 - text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" - type: skip - audit: | - ps -ef | grep $kubeletbin | grep -- --cni-conf-dir | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G - find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)" + audit: find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G use_multiple_values: true tests: test_items: @@ -161,25 +145,28 @@ groups: remediation: | Run the below command (based on the file location on your system) on the control plane node. For example, - chown root:root - Not Applicable. - scored: false + chown root:root /var/lib/cni/networks/ + scored: true - id: 1.1.11 text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" - audit: "stat -c %a $etcddatadir" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd + else + echo "permissions=700" + fi tests: test_items: - - flag: "700" + - flag: "permissions" compare: - op: eq + op: bitmask value: "700" - set: true remediation: | On the etcd server node, get the etcd data directory, passed as an argument --data-dir, from the command 'ps -ef | grep etcd'. Run the below command (based on the etcd data directory found above). For example, - chmod 700 /var/lib/etcd + chmod 700 $etcddatadir scored: true - id: 1.1.12 @@ -190,17 +177,14 @@ groups: test_items: - flag: "etcd:etcd" remediation: | - On the etcd server node, get the etcd data directory, passed as an argument --data-dir, - from the command 'ps -ef | grep etcd'. - Run the below command (based on the etcd data directory found above). - For example, chown etcd:etcd /var/lib/etcd Not Applicable. + For K3s, etcd is embedded within the k3s process. There is no separate etcd process. + Therefore the etcd data directory ownership is managed by the k3s process and should be root:root. scored: true - id: 1.1.13 text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig'" - type: "skip" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" tests: test_items: - flag: "permissions" @@ -224,12 +208,12 @@ groups: set: true remediation: | Run the below command (based on the file location on your system) on the control plane node. - For example, chown root:root /etc/kubernetes/admin.conf + For example, chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig scored: true - id: 1.1.15 text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" tests: test_items: - flag: "permissions" @@ -244,7 +228,7 @@ groups: - id: 1.1.16 text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig; fi'" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" tests: test_items: - flag: "root:root" @@ -256,7 +240,7 @@ groups: - id: 1.1.17 text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" - audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/controller.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/controller.kubeconfig; fi'" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" tests: test_items: - flag: "permissions" @@ -271,7 +255,7 @@ groups: - id: 1.1.18 text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" - audit: "stat -c %U:%G /var/lib/rancher/k3s/server/cred/controller.kubeconfig" + audit: "stat -c %U:%G $controllermanagerkubeconfig" tests: test_items: - flag: "root:root" @@ -300,7 +284,7 @@ groups: - id: 1.1.20 text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" - audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.crt" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.crt'" use_multiple_values: true tests: test_items: @@ -309,14 +293,14 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. + Run the below command (based on the file location on your system) on the master node. For example, - chmod -R 600 /etc/kubernetes/pki/*.crt + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt scored: false - id: 1.1.21 - text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" - audit: "stat -c %n\ %a /var/lib/rancher/k3s/server/tls/*.key" + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key'" use_multiple_values: true tests: test_items: @@ -325,17 +309,17 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the control plane node. + Run the below command (based on the file location on your system) on the master node. For example, - chmod -R 600 /etc/kubernetes/pki/*.key - scored: false + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key + scored: true - id: 1.2 text: "API Server" checks: - id: 1.2.1 - text: "Ensure that the --anonymous-auth argument is set to false (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" tests: test_items: - flag: "--anonymous-auth" @@ -343,27 +327,29 @@ groups: op: eq value: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --anonymous-auth=false - scored: false + By default, K3s sets the --anonymous-auth argument to false. If it is set to true, + edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "anonymous-auth=true" + scored: true - id: 1.2.2 text: "Ensure that the --token-auth-file parameter is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--token-auth-file" set: false remediation: | - Follow the documentation and configure alternate mechanisms for authentication. Then, - edit the API server pod specification file $apiserverconf - on the control plane node and remove the --token-auth-file= parameter. + Follow the documentation and configure alternate mechanisms for authentication. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "token-auth-file=" scored: true - id: 1.2.3 text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -375,47 +361,46 @@ groups: - flag: "--enable-admission-plugins" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and remove the `DenyServiceExternalIPs` - from enabled admission plugins. + By default, K3s does not set DenyServiceExternalIPs. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" scored: true - id: 1.2.4 text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" tests: bin_op: and test_items: - flag: "--kubelet-client-certificate" - flag: "--kubelet-client-key" remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the - apiserver and kubelets. Then, edit API server pod specification file - $apiserverconf on the control plane node and set the - kubelet client certificate and key parameters as below. - --kubelet-client-certificate= - --kubelet-client-key= + By default, K3s automatically provides the kubelet client certificate and key. + They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" scored: true - id: 1.2.5 text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" - type: "skip" - audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" tests: test_items: - flag: "--kubelet-certificate-authority" remediation: | - Follow the Kubernetes documentation and setup the TLS connection between - the apiserver and kubelets. Then, edit the API server pod specification file - $apiserverconf on the control plane node and set the - --kubelet-certificate-authority parameter to the path to the cert file for the certificate authority. - --kubelet-certificate-authority= - Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. + By default, K3s automatically provides the kubelet CA cert file, at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "kubelet-certificate-authority=" scored: true - id: 1.2.6 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -423,15 +408,15 @@ groups: op: nothave value: "AlwaysAllow" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to values other than AlwaysAllow. - One such example could be as below. - --authorization-mode=RBAC + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" scored: true - id: 1.2.7 text: "Ensure that the --authorization-mode argument includes Node (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -439,14 +424,14 @@ groups: op: has value: "Node" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes Node. - --authorization-mode=Node,RBAC + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. scored: true - id: 1.2.8 text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" tests: test_items: - flag: "--authorization-mode" @@ -454,14 +439,14 @@ groups: op: has value: "RBAC" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --authorization-mode parameter to a value that includes RBAC, - for example `--authorization-mode=Node,RBAC`. + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. scored: true - id: 1.2.9 text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -470,15 +455,15 @@ groups: value: "EventRateLimit" remediation: | Follow the Kubernetes documentation and set the desired limits in a configuration file. - Then, edit the API server pod specification file $apiserverconf - and set the below parameters. - --enable-admission-plugins=...,EventRateLimit,... - --admission-control-config-file= + Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters. + kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" scored: false - id: 1.2.10 text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: bin_op: or test_items: @@ -489,9 +474,10 @@ groups: - flag: "--enable-admission-plugins" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --enable-admission-plugins parameter, or set it to a - value that does not include AlwaysAdmit. + By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" scored: true - id: 1.2.11 @@ -504,10 +490,13 @@ groups: op: has value: "AlwaysPullImages" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - AlwaysPullImages. - --enable-admission-plugins=...,AlwaysPullImages,... + Permissive, per CIS guidelines, + "This setting could impact offline or isolated clusters, which have images pre-loaded and + do not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter. + kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." scored: false - id: 1.2.12 @@ -526,16 +515,13 @@ groups: op: has value: "PodSecurityPolicy" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to include - SecurityContextDeny, unless PodSecurityPolicy is already in place. - --enable-admission-plugins=...,SecurityContextDeny,... - Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. + Not Applicable. + Enabling Pod Security Policy is no longer supported on K3s v1.25+ and will cause applications to unexpectedly fail. scored: false - id: 1.2.13 text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -546,15 +532,16 @@ groups: - flag: "--disable-admission-plugins" set: false remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. Follow the documentation and create ServiceAccount objects as per your environment. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and ensure that the --disable-admission-plugins parameter is set to a - value that does not include ServiceAccount. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" scored: true - id: 1.2.14 text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -565,14 +552,15 @@ groups: - flag: "--disable-admission-plugins" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --disable-admission-plugins parameter to - ensure it does not include NamespaceLifecycle. + By default, K3s does not set the --disable-admission-plugins to anything. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." scored: true - id: 1.2.15 text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" tests: test_items: - flag: "--enable-admission-plugins" @@ -580,16 +568,16 @@ groups: op: has value: "NodeRestriction" remediation: | - Follow the Kubernetes documentation and configure NodeRestriction plug-in on kubelets. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --enable-admission-plugins parameter to a - value that includes NodeRestriction. - --enable-admission-plugins=...,NodeRestriction,... + By default, K3s sets the --enable-admission-plugins to NodeRestriction. + If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins. + If you are, include NodeRestriction in the list. + kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." scored: true - id: 1.2.16 text: "Ensure that the --secure-port argument is not set to 0 - NoteThis recommendation is obsolete and will be deleted per the consensus process (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'secure-port'" tests: bin_op: or test_items: @@ -600,14 +588,15 @@ groups: - flag: "--secure-port" set: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and either remove the --secure-port parameter or - set it to a different (non-zero) desired port. + By default, K3s sets the secure port to 6444. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "secure-port=" scored: true - id: 1.2.17 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" tests: test_items: - flag: "--profiling" @@ -615,30 +604,28 @@ groups: op: eq value: false remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --profiling=false + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "profiling=true" scored: true - id: 1.2.18 - text: "Ensure that the --audit-log-path argument is set (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--audit-log-path" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-path parameter to a suitable path and + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and file where you would like audit logs to be written, for example, - --audit-log-path=/var/log/apiserver/audit.log - Permissive. - scored: true + kube-apiserver-arg: + - "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log" + scored: false - id: 1.2.19 - text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--audit-log-maxage" @@ -646,17 +633,15 @@ groups: op: gte value: 30 remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxage parameter to 30 - or as an appropriate number of days, for example, - --audit-log-maxage=30 - Permissive. - scored: true + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example, + kube-apiserver-arg: + - "audit-log-maxage=30" + scored: false - id: 1.2.20 - text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--audit-log-maxbackup" @@ -664,17 +649,15 @@ groups: op: gte value: 10 remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxbackup parameter to 10 or to an appropriate - value. For example, - --audit-log-maxbackup=10 - Permissive. - scored: true + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example, + kube-apiserver-arg: + - "audit-log-maxbackup=10" + scored: false - id: 1.2.21 - text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--audit-log-maxsize" @@ -682,29 +665,30 @@ groups: op: gte value: 100 remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --audit-log-maxsize parameter to an appropriate size in MB. - For example, to set it as 100 MB, --audit-log-maxsize=100 - Permissive. - scored: true + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxsize parameter to an appropriate size in MB. For example, + kube-apiserver-arg: + - "audit-log-maxsize=100" + scored: false - id: 1.2.22 text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--request-timeout" remediation: | - Edit the API server pod specification file $apiserverconf - and set the below parameter as appropriate and if needed. - For example, --request-timeout=300s - Permissive. + Permissive, per CIS guidelines, + "it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". + Edit the K3s config file /etc/rancher/k3s/config.yaml + and set the below parameter if needed. For example, + kube-apiserver-arg: + - "request-timeout=300s" scored: false - id: 1.2.23 text: "Ensure that the --service-account-lookup argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: bin_op: or test_items: @@ -715,30 +699,36 @@ groups: op: eq value: true remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the below parameter. - --service-account-lookup=true - Alternatively, you can delete the --service-account-lookup parameter from this file so + By default, K3s does not set the --service-account-lookup argument. + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example, + kube-apiserver-arg: + - "service-account-lookup=true" + Alternatively, you can delete the service-account-lookup parameter from this file so that the default takes effect. scored: true - id: 1.2.24 text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -v grep" - type: "skip" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" tests: test_items: - flag: "--service-account-key-file" remediation: | - Edit the API server pod specification file $apiserverconf - on the control plane node and set the --service-account-key-file parameter - to the public key file for service accounts. For example, - --service-account-key-file= + K3s automatically generates and sets the service account key file. + It is located at /var/lib/rancher/k3s/server/tls/service.key. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "service-account-key-file=" scored: true - id: 1.2.25 text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + journalctl -m -u k3s | grep -m1 'Running kube-apiserver' | tail -n1 + else + echo "--etcd-certfile AND --etcd-keyfile" + fi tests: bin_op: and test_items: @@ -747,16 +737,17 @@ groups: - flag: "--etcd-keyfile" set: true remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate and key file parameters. - --etcd-certfile= - --etcd-keyfile= + K3s automatically generates and sets the etcd certificate and key files. + They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" scored: true - id: 1.2.26 text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" tests: bin_op: and test_items: @@ -765,60 +756,61 @@ groups: - flag: "--tls-private-key-file" set: true remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the TLS certificate and private key file parameters. - --tls-cert-file= - --tls-private-key-file= + By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver. + They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" scored: true - id: 1.2.27 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" tests: test_items: - flag: "--client-ca-file" remediation: | - Follow the Kubernetes documentation and set up the TLS connection on the apiserver. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the client certificate authority file. - --client-ca-file= + By default, K3s automatically provides the client certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "client-ca-file=" scored: true - id: 1.2.28 text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" tests: test_items: - flag: "--etcd-cafile" remediation: | - Follow the Kubernetes documentation and set up the TLS connection between the apiserver and etcd. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the etcd certificate authority file parameter. - --etcd-cafile= + By default, K3s automatically provides the etcd certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-cafile=" scored: true - id: 1.2.29 text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" - type: "skip" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" tests: test_items: - flag: "--encryption-provider-config" remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - Then, edit the API server pod specification file $apiserverconf - on the control plane node and set the --encryption-provider-config parameter to the path of that file. - For example, --encryption-provider-config= - Permissive - Enabling encryption changes how data can be recovered as data is encrypted. + K3s can be configured to use encryption providers to encrypt secrets at rest. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json. scored: false - id: 1.2.30 text: "Ensure that encryption providers are appropriately configured (Manual)" - type: "skip" audit: | - ENCRYPTION_PROVIDER_CONFIG=$(ps -ef | grep $apiserverbin | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') - if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -A1 'providers:' $ENCRYPTION_PROVIDER_CONFIG | tail -n1 | grep -o "[A-Za-z]*" | sed 's/^/provider=/'; fi + ENCRYPTION_PROVIDER_CONFIG=$(journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi tests: test_items: - flag: "provider" @@ -826,14 +818,16 @@ groups: op: valid_elements value: "aescbc,kms,secretbox" remediation: | - Follow the Kubernetes documentation and configure a EncryptionConfig file. - In this file, choose aescbc, kms or secretbox as the encryption provider. - Permissive - Enabling encryption changes how data can be recovered as data is encrypted. + K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json scored: false - - id: 1.2.32 - text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + - id: 1.2.31 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" tests: test_items: - flag: "--tls-cipher-suites" @@ -841,36 +835,32 @@ groups: op: valid_elements value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" remediation: | - Edit the API server pod specification file /etc/kubernetes/manifests/kube-apiserver.yaml - on the control plane node and set the below parameter. - --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA, - TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384 - scored: false + By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. + If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. + If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following: + kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + scored: true - id: 1.3 text: "Controller Manager" checks: - id: 1.3.1 text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" tests: test_items: - flag: "--terminated-pod-gc-threshold" remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --terminated-pod-gc-threshold to an appropriate threshold, - for example, --terminated-pod-gc-threshold=10 + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node + and set the --terminated-pod-gc-threshold to an appropriate threshold, + kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" scored: false - id: 1.3.2 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" tests: test_items: - flag: "--profiling" @@ -878,14 +868,15 @@ groups: op: eq value: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the below parameter. - --profiling=false + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "profiling=true" scored: true - id: 1.3.3 text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" tests: test_items: - flag: "--use-service-account-credentials" @@ -893,40 +884,43 @@ groups: op: noteq value: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node to set the below parameter. - --use-service-account-credentials=true + By default, K3s sets the --use-service-account-credentials argument to true. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "use-service-account-credentials=false" scored: true - id: 1.3.4 text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" tests: test_items: - flag: "--service-account-private-key-file" remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --service-account-private-key-file parameter - to the private key file for service accounts. - --service-account-private-key-file= + By default, K3s automatically provides the service account private key file. + It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "service-account-private-key-file=" scored: true - id: 1.3.5 text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" tests: test_items: - flag: "--root-ca-file" remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --root-ca-file parameter to the certificate bundle file`. - --root-ca-file= + By default, K3s automatically provides the root CA file. + It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "root-ca-file=" scored: true - id: 1.3.6 text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" - type: "skip" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'RotateKubeletServerCertificate'" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1" tests: bin_op: or test_items: @@ -938,10 +932,11 @@ groups: - flag: "--feature-gates" set: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. - --feature-gates=RotateKubeletServerCertificate=true - Not Applicable. + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" scored: true - id: 1.3.7 @@ -958,8 +953,10 @@ groups: - flag: "--bind-address" set: false remediation: | - Edit the Controller Manager pod specification file $controllermanagerconf - on the control plane node and ensure the correct value for the --bind-address parameter + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "bind-address=" scored: true - id: 1.4 @@ -967,7 +964,7 @@ groups: checks: - id: 1.4.1 text: "Ensure that the --profiling argument is set to false (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1" tests: test_items: - flag: "--profiling" @@ -976,14 +973,15 @@ groups: value: false set: true remediation: | - Edit the Scheduler pod specification file $schedulerconf file - on the control plane node and set the below parameter. - --profiling=false + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "profiling=true" scored: true - id: 1.4.2 text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" tests: bin_op: or test_items: @@ -995,6 +993,8 @@ groups: - flag: "--bind-address" set: false remediation: | - Edit the Scheduler pod specification file $schedulerconf - on the control plane node and ensure the correct value for the --bind-address parameter + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "bind-address=" scored: true diff --git a/cfg/k3s-cis-1.7/node.yaml b/cfg/k3s-cis-1.7/node.yaml index 780bb4d8b..b873aea68 100644 --- a/cfg/k3s-cis-1.7/node.yaml +++ b/cfg/k3s-cis-1.7/node.yaml @@ -19,9 +19,8 @@ groups: op: bitmask value: "600" remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, chmod 600 $kubeletsvc - Not Applicable - All configuration is passed in as arguments at container run time. + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. scored: true - id: 4.1.2 @@ -32,16 +31,15 @@ groups: test_items: - flag: root:root remediation: | - Run the below command (based on the file location on your system) on the each worker node. - For example, - chown root:root $kubeletsvc + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. Not Applicable. All configuration is passed in as arguments at container run time. scored: true - id: 4.1.3 - text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Manual)" - audit: 'stat -c %a /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig' + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' tests: bin_op: or test_items: @@ -53,11 +51,11 @@ groups: Run the below command (based on the file location on your system) on the each worker node. For example, chmod 600 $proxykubeconfig - scored: false + scored: true - id: 4.1.4 - text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Manual)" - audit: '/bin/sh -c ''if test -e /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig; fi'' ' + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' tests: bin_op: or test_items: @@ -65,7 +63,7 @@ groups: remediation: | Run the below command (based on the file location on your system) on the each worker node. For example, chown root:root $proxykubeconfig - scored: false + scored: true - id: 4.1.5 text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" @@ -84,7 +82,7 @@ groups: - id: 4.1.6 text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" - audit: 'stat -c %U:%G /var/lib/rancher/k3s/agent/kubelet.kubeconfig' + audit: 'stat -c %U:%G $kubeletkubeconfig' tests: test_items: - flag: root:root @@ -95,8 +93,8 @@ groups: scored: true - id: 4.1.7 - text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Manual)" - audit: "stat -c %a /var/lib/rancher/k3s/server/tls/server-ca.crt" + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a $kubeletcafile" tests: test_items: - flag: "permissions" @@ -105,19 +103,22 @@ groups: value: "600" remediation: | Run the following command to modify the file permissions of the - --client-ca-file chmod 600 - scored: false + --client-ca-file chmod 600 $kubeletcafile + scored: true - id: 4.1.8 - text: "Ensure that the client certificate authorities file ownership is set to root:root (Manual)" - audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls/client-ca.crt" + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $kubeletcafile" tests: test_items: - flag: root:root + compare: + op: eq + value: root:root remediation: | Run the following command to modify the ownership of the --client-ca-file. - chown root:root - scored: false + chown root:root $kubeletcafile + scored: true - id: 4.1.9 text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)" @@ -130,8 +131,8 @@ groups: op: bitmask value: "600" remediation: | - Run the following command (using the config file location identified in the Audit step) - chmod 600 $kubeletconf + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. scored: true - id: 4.1.10 @@ -142,10 +143,8 @@ groups: test_items: - flag: root:root remediation: | - Run the following command (using the config file location identified in the Audit step) - chown root:root $kubeletconf Not Applicable. - All configuration is passed in as arguments at container run time. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. scored: true - id: 4.2 @@ -153,7 +152,7 @@ groups: checks: - id: 4.2.1 text: "Ensure that the --anonymous-auth argument is set to false (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' tests: test_items: - flag: "--anonymous-auth" @@ -162,20 +161,20 @@ groups: op: eq value: false remediation: | - If using a Kubelet config file, edit the file to set `authentication: anonymous: enabled` to - `false`. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - `--anonymous-auth=false` - Based on your system, restart the kubelet service. For example, + By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you + should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "anonymous-auth=true" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="anonymous-auth=true" + Based on your system, restart the k3s service. For example, systemctl daemon-reload - systemctl restart kubelet.service + systemctl restart k3s.service scored: true - id: 4.2.2 text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode" | grep -v grep; else echo "--authorization-mode=Webhook"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' ' audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: test_items: @@ -185,38 +184,32 @@ groups: op: nothave value: AlwaysAllow remediation: | - If using a Kubelet config file, edit the file to set `authorization.mode` to Webhook. If - using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --authorization-mode=Webhook - Based on your system, restart the kubelet service. For example, + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "authorization-mode=AlwaysAllow" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="authorization-mode=AlwaysAllow" + Based on your system, restart the k3s service. For example, systemctl daemon-reload - systemctl restart kubelet.service + systemctl restart k3s.service scored: true - id: 4.2.3 text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" - audit: '/bin/sh -c ''if test $(journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -D /var/log/journal -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file" | grep -v grep; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' tests: test_items: - flag: --client-ca-file path: '{.authentication.x509.clientCAFile}' remediation: | - If using a Kubelet config file, edit the file to set `authentication.x509.clientCAFile` to - the location of the client CA file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_AUTHZ_ARGS variable. - --client-ca-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + By default, K3s automatically provides the client ca certificate for the Kubelet. + It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt scored: true - id: 4.2.4 - text: "Verify that the --read-only-port argument is set to 0 (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'read-only-port' " + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: bin_op: or @@ -230,19 +223,20 @@ groups: path: '{.readOnlyPort}' set: false remediation: | - If using a Kubelet config file, edit the file to set `readOnlyPort` to 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --read-only-port=0 - Based on your system, restart the kubelet service. For example, + By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you + should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "read-only-port=XXXX" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="read-only-port=XXXX" + Based on your system, restart the k3s service. For example, systemctl daemon-reload - systemctl restart kubelet.service - scored: false + systemctl restart k3s.service + scored: true - id: 4.2.5 text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'streaming-connection-idle-timeout'" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --streaming-connection-idle-timeout @@ -255,21 +249,17 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to set `streamingConnectionIdleTimeout` to a - value other than 0. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - --streaming-connection-idle-timeout=5m - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "streaming-connection-idle-timeout=5m" + If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: false - id: 4.2.6 text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" - type: "skip" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1 | grep 'make-iptables-util-chains'" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --make-iptables-util-chains @@ -282,41 +272,31 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to set `makeIPTablesUtilChains` to `true`. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove the --make-iptables-util-chains argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - Permissive. + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. + kubelet-arg: + - "make-iptables-util-chains=true" + If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: true - id: 4.2.7 - text: "Ensure that the --hostname-override argument is not set (Manual)" - # This is one of those properties that can only be set as a command line argument. - # To check if the property is set as expected, we need to parse the kubelet command - # instead reading the Kubelet Configuration file. + text: "Ensure that the --hostname-override argument is not set (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" type: "skip" - audit: "/bin/ps -fC $kubeletbin " tests: test_items: - flag: --hostname-override set: false remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and remove the --hostname-override argument from the - KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service Not Applicable. - scored: false + By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply + with cloud providers that require this flag to ensure that hostname matches node names. + scored: true - id: 4.2.8 text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" - audit: "/bin/ps -fC $kubeletbin" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: test_items: @@ -330,19 +310,18 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to set `eventRecordQPS` to an appropriate level. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameter in KUBELET_SYSTEM_PODS_ARGS variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service + By default, K3s sets the event-qps to 0. Should you wish to change this, + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "event-qps=" + If using the command line, run K3s with --kubelet-arg="event-qps=". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: false - id: 4.2.9 - text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Manual)" - type: "skip" - audit: "journalctl -D /var/log/journal -u k3s | grep 'Running kubelet' | tail -n1" + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" tests: test_items: - flag: --tls-cert-file @@ -350,23 +329,18 @@ groups: - flag: --tls-private-key-file path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' remediation: | - If using a Kubelet config file, edit the file to set `tlsCertFile` to the location - of the certificate file to use to identify this Kubelet, and `tlsPrivateKeyFile` - to the location of the corresponding private key file. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the below parameters in KUBELET_CERTIFICATE_ARGS variable. - --tls-cert-file= - --tls-private-key-file= - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - Permissive - When generating serving certificates, functionality could break in conjunction with hostname overrides which are required for certain cloud providers. - scored: false + By default, K3s automatically provides the TLS certificate and private key for the Kubelet. + They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key + If for some reason you need to provide your own certificate and key, you can set the + the below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true - id: 4.2.10 - text: "Ensure that the --rotate-certificates argument is not set to false (Manual)" - audit: "/bin/ps -fC $kubeletbin" + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: test_items: @@ -380,20 +354,16 @@ groups: set: false bin_op: or remediation: | - If using a Kubelet config file, edit the file to add the line `rotateCertificates` to `true` or - remove it altogether to use the default value. - If using command line arguments, edit the kubelet service file - $kubeletsvc on each worker node and - remove --rotate-certificates=false argument from the KUBELET_CERTIFICATE_ARGS - variable. - Based on your system, restart the kubelet service. For example, - systemctl daemon-reload - systemctl restart kubelet.service - scored: false + By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter. + If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true - id: 4.2.11 - text: "Verify that the RotateKubeletServerCertificate argument is set to true (Manual)" - audit: "/bin/ps -fC $kubeletbin" + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/cat $kubeletconf" tests: bin_op: or @@ -407,18 +377,17 @@ groups: path: '{.featureGates.RotateKubeletServerCertificate}' set: false remediation: | - Edit the kubelet service file $kubeletsvc - on each worker node and set the below parameter in KUBELET_CERTIFICATE_ARGS variable. - --feature-gates=RotateKubeletServerCertificate=true - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service - Not Applicable. - scored: false + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter. + If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true - id: 4.2.12 text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" - audit: "/bin/ps -fC $kubeletbin" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: test_items: @@ -428,21 +397,18 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `tlsCipherSuites` to + kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" or to a subset of these values. - If using executable arguments, edit the kubelet service file - $kubeletsvc on each worker node and - set the --tls-cipher-suites parameter as follows, or to a subset of these values. - --tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 - Based on your system, restart the kubelet service. For example: - systemctl daemon-reload - systemctl restart kubelet.service + If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=" + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service scored: false - id: 4.2.13 text: "Ensure that a limit is set on pod PIDs (Manual)" - audit: "/bin/ps -fC $kubeletbin" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " tests: test_items: @@ -450,5 +416,7 @@ groups: path: '{.podPidsLimit}' remediation: | Decide on an appropriate level for this parameter and set it, - either via the --pod-max-pids command line parameter or the PodPidsLimit configuration file setting. + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `podPidsLimit` to + kubelet-arg: + - "pod-max-pids=" scored: false diff --git a/cfg/k3s-cis-1.7/policies.yaml b/cfg/k3s-cis-1.7/policies.yaml index 3b1d1ef17..f4d4a24e8 100644 --- a/cfg/k3s-cis-1.7/policies.yaml +++ b/cfg/k3s-cis-1.7/policies.yaml @@ -43,23 +43,15 @@ groups: - id: 5.1.5 text: "Ensure that default service accounts are not actively used. (Manual)" - type: "skip" - audit: check_for_default_sa.sh - tests: - test_items: - - flag: "true" - compare: - op: eq - value: "true" - set: true + type: "manual" remediation: | Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server. Modify the configuration of each default service account to include this value automountServiceAccountToken: false - Permissive - Kubernetes provides default service accounts to be used. scored: false + - id: 5.1.6 text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" type: "manual" @@ -138,29 +130,23 @@ groups: - id: 5.2.3 text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" - type: "skip" remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostPID` containers. - Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. scored: false - id: 5.2.4 text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" - type: "skip" remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostIPC` containers. - Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. scored: false - id: 5.2.5 text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" - type: "skip" remediation: | Add policies to each namespace in the cluster which has user workloads to restrict the admission of `hostNetwork` containers. - Permissive - Enabling Pod Security Policy can cause applications to unexpectedly fail. scored: false - id: 5.2.6 @@ -199,8 +185,8 @@ groups: text: "Minimize the admission of containers with capabilities assigned (Manual)" type: "manual" remediation: | - Review the use of capabilites in applications running on your cluster. Where a namespace - contains applicaions which do not require any Linux capabities to operate consider adding + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding a PSP which forbids the admission of containers which do not drop all capabilities. scored: false @@ -242,10 +228,8 @@ groups: - id: 5.3.2 text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" - type: "skip" remediation: | Follow the documentation and create NetworkPolicy objects as you need them. - Permissive - Enabling Network Policies can prevent certain applications from communicating with each other. scored: false - id: 5.4 @@ -310,9 +294,7 @@ groups: - id: 5.7.4 text: "The default namespace should not be used (Manual)" - type: "skip" remediation: | Ensure that namespaces are created to allow for appropriate segregation of Kubernetes resources and that all new resources are created in a specific namespace. - Permissive - Kubernetes provides a default namespace. scored: false diff --git a/cfg/k3s-cis-1.8/config.yaml b/cfg/k3s-cis-1.8/config.yaml new file mode 100644 index 000000000..fff6edb7d --- /dev/null +++ b/cfg/k3s-cis-1.8/config.yaml @@ -0,0 +1,54 @@ +--- +## Version-specific settings that override the values in cfg/config.yaml + +master: + components: + - apiserver + - kubelet + - scheduler + - controllermanager + - etcd + - policies + apiserver: + bins: + - containerd + kubelet: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + scheduler: + bins: + - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/scheduler.kubeconfig + controllermanager: + bins: + - containerd + kubeconfig: + - /var/lib/rancher/k3s/server/cred/controller.kubeconfig + etcd: + bins: + - containerd + +etcd: + confs: /var/lib/rancher/k3s/server/db/etcd/config + +node: + components: + - kubelet + - proxy + kubelet: + bins: + - containerd + confs: + - /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubelet.kubeconfig + defaultcafile: /var/lib/rancher/k3s/agent/client-ca.crt + proxy: + bins: + - containerd + defaultkubeconfig: /var/lib/rancher/k3s/agent/kubeproxy.kubeconfig +policies: + components: + - policies diff --git a/cfg/k3s-cis-1.8/controlplane.yaml b/cfg/k3s-cis-1.8/controlplane.yaml new file mode 100644 index 000000000..d14070656 --- /dev/null +++ b/cfg/k3s-cis-1.8/controlplane.yaml @@ -0,0 +1,62 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 3 +text: "Control Plane Configuration" +type: "controlplane" +groups: + - id: 3.1 + text: "Authentication and Authorization" + checks: + - id: 3.1.1 + text: "Client certificate authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be + implemented in place of client certificates. + scored: false + + - id: 3.1.2 + text: "Service account token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of service account tokens. + scored: false + + - id: 3.1.3 + text: "Bootstrap token authentication should not be used for users (Manual)" + type: "manual" + remediation: | + Alternative mechanisms provided by Kubernetes such as the use of OIDC should be implemented + in place of bootstrap tokens. + scored: false + + - id: 3.2 + text: "Logging" + checks: + - id: 3.2.1 + text: "Ensure that a minimal audit policy is created (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'audit-policy-file'" + tests: + test_items: + - flag: "--audit-policy-file" + set: true + remediation: | + Create an audit policy file for your cluster. + scored: false + + - id: 3.2.2 + text: "Ensure that the audit policy covers key security concerns (Manual)" + type: "manual" + remediation: | + Review the audit policy provided for the cluster and ensure that it covers + at least the following areas, + - Access to Secrets managed by the cluster. Care should be taken to only + log Metadata for requests to Secrets, ConfigMaps, and TokenReviews, in + order to avoid risk of logging sensitive data. + - Modification of Pod and Deployment objects. + - Use of `pods/exec`, `pods/portforward`, `pods/proxy` and `services/proxy`. + For most requests, minimally logging at the Metadata level is recommended + (the most basic level of logging). + scored: false diff --git a/cfg/k3s-cis-1.8/etcd.yaml b/cfg/k3s-cis-1.8/etcd.yaml new file mode 100644 index 000000000..4a39779cb --- /dev/null +++ b/cfg/k3s-cis-1.8/etcd.yaml @@ -0,0 +1,144 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 2 +text: "Etcd Node Configuration" +type: "etcd" +groups: + - id: 2 + text: "Etcd Node Configuration" + checks: + - id: 2.1 + text: "Ensure that the --cert-file and --key-file arguments are set as appropriate (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: and + test_items: + - path: "{.client-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.crt" + - path: "{.client-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/server-client.key" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom cert and key files. + scored: false + + - id: 2.2 + text: "Ensure that the --client-cert-auth argument is set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.client-transport-security.client-cert-auth}" + compare: + op: eq + value: true + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --client-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable client certificate authentication. + scored: false + + - id: 2.3 + text: "Ensure that the --auto-tls argument is not set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - path: "{.client-transport-security.auto-tls}" + compare: + op: eq + value: false + - path: "{.client-transport-security.auto-tls}" + set: false + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master + node and either remove the --auto-tls parameter or set it to false. + client-transport-security: + auto-tls: false + scored: false + + - id: 2.4 + text: "Ensure that the --peer-cert-file and --peer-key-file arguments are set as appropriate (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: and + test_items: + - path: "{.peer-transport-security.cert-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.crt" + - path: "{.peer-transport-security.key-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-server-client.key" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates peer cert and key files for etcd. + These are located in /var/lib/rancher/k3s/server/tls/etcd/. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use custom peer cert and key files. + scored: false + + - id: 2.5 + text: "Ensure that the --peer-client-cert-auth argument is set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.peer-transport-security.client-cert-auth}" + compare: + op: eq + value: true + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s sets the --peer-cert-auth parameter to true. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to disable peer client certificate authentication. + scored: false + + - id: 2.6 + text: "Ensure that the --peer-auto-tls argument is not set to true (Automated)" + audit_config: "cat $etcdconf" + tests: + bin_op: or + test_items: + - path: "{.peer-transport-security.auto-tls}" + compare: + op: eq + value: false + - path: "{.peer-transport-security.auto-tls}" + set: false + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s does not set the --peer-auto-tls parameter. + If this check fails, edit the etcd pod specification file $etcdconf on the master + node and either remove the --peer-auto-tls parameter or set it to false. + peer-transport-security: + auto-tls: false + scored: false + + - id: 2.7 + text: "Ensure that a unique Certificate Authority is used for etcd (Automated)" + audit_config: "cat $etcdconf" + tests: + test_items: + - path: "{.peer-transport-security.trusted-ca-file}" + compare: + op: eq + value: "/var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt" + remediation: | + If running on with sqlite or a external DB, etcd checks are Not Applicable. + When running with embedded-etcd, K3s generates a unique certificate authority for etcd. + This is located at /var/lib/rancher/k3s/server/tls/etcd/peer-ca.crt. + If this check fails, ensure that the configuration file $etcdconf + has not been modified to use a shared certificate authority. + scored: false diff --git a/cfg/k3s-cis-1.8/master.yaml b/cfg/k3s-cis-1.8/master.yaml new file mode 100644 index 000000000..eb4799a17 --- /dev/null +++ b/cfg/k3s-cis-1.8/master.yaml @@ -0,0 +1,985 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 1 +text: "Control Plane Security Configuration" +type: "master" +groups: + - id: 1.1 + text: "Control Plane Node Configuration Files" + checks: + - id: 1.1.1 + text: "Ensure that the API server pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c permissions=%a $apiserverconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + scored: true + + - id: 1.1.2 + text: "Ensure that the API server pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $apiserverconf; then stat -c %U:%G $apiserverconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the api server within the k3s process. There is no API server pod specification file. + scored: true + + - id: 1.1.3 + text: "Ensure that the controller manager pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c permissions=%a $controllermanagerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + scored: true + + - id: 1.1.4 + text: "Ensure that the controller manager pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $controllermanagerconf; then stat -c %U:%G $controllermanagerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the controller manager within the k3s process. There is no controller manager pod specification file. + scored: true + + - id: 1.1.5 + text: "Ensure that the scheduler pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c permissions=%a $schedulerconf; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + scored: true + + - id: 1.1.6 + text: "Ensure that the scheduler pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $schedulerconf; then stat -c %U:%G $schedulerconf; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds the scheduler within the k3s process. There is no scheduler pod specification file. + scored: true + + - id: 1.1.7 + text: "Ensure that the etcd pod specification file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c permissions=%a; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + scored: true + + - id: 1.1.8 + text: "Ensure that the etcd pod specification file ownership is set to root:root (Automated)" + type: "skip" + audit: "/bin/sh -c 'if test -e $etcdconf; then find $etcdconf -name '*etcd*' | xargs stat -c %U:%G; fi'" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Not Applicable. + By default, K3s embeds etcd within the k3s process. There is no etcd pod specification file. + scored: true + + - id: 1.1.9 + text: "Ensure that the Container Network Interface file permissions are set to 600 or more restrictive (Automated)" + audit: find /var/lib/cni/networks -type f ! -name lock 2> /dev/null | xargs --no-run-if-empty stat -c permissions=%a + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + By default, K3s sets the CNI file permissions to 600. + Note that for many CNIs, a lock file is created with permissions 750. This is expected and can be ignored. + If you modify your CNI configuration, ensure that the permissions are set to 600. + For example, chmod 600 /var/lib/cni/networks/ + scored: true + + - id: 1.1.10 + text: "Ensure that the Container Network Interface file ownership is set to root:root (Automated)" + audit: find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root /var/lib/cni/networks/ + scored: true + + - id: 1.1.11 + text: "Ensure that the etcd data directory permissions are set to 700 or more restrictive (Automated)" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + stat -c permissions=%a /var/lib/rancher/k3s/server/db/etcd + else + echo "permissions=700" + fi + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "700" + remediation: | + On the etcd server node, get the etcd data directory, passed as an argument --data-dir, + from the command 'ps -ef | grep etcd'. + Run the below command (based on the etcd data directory found above). For example, + chmod 700 /var/lib/etcd + scored: true + + - id: 1.1.12 + text: "Ensure that the etcd data directory ownership is set to etcd:etcd (Automated)" + audit: ps -ef | grep $etcdbin | grep -- --data-dir | sed 's%.*data-dir[= ]\([^ ]*\).*%\1%' | xargs stat -c %U:%G + type: "skip" + tests: + test_items: + - flag: "etcd:etcd" + remediation: | + Not Applicable. + For K3s, etcd is embedded within the k3s process. There is no separate etcd process. + Therefore the etcd data directory ownership is managed by the k3s process and should be root:root. + scored: true + + - id: 1.1.13 + text: "Ensure that the admin.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c permissions=%a /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chmod 600 /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.14 + text: "Ensure that the admin.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e /var/lib/rancher/k3s/server/cred/admin.kubeconfig; then stat -c %U:%G /var/lib/rancher/k3s/server/cred/admin.kubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, chown root:root /var/lib/rancher/k3s/server/cred/admin.kubeconfig + scored: true + + - id: 1.1.15 + text: "Ensure that the scheduler.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c permissions=%a $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $schedulerkubeconfig + scored: true + + - id: 1.1.16 + text: "Ensure that the scheduler.conf file ownership is set to root:root (Automated)" + audit: "/bin/sh -c 'if test -e $schedulerkubeconfig; then stat -c %U:%G $schedulerkubeconfig; fi'" + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $schedulerkubeconfig + scored: true + + - id: 1.1.17 + text: "Ensure that the controller-manager.conf file permissions are set to 600 or more restrictive (Automated)" + audit: "/bin/sh -c 'if test -e $controllermanagerkubeconfig; then stat -c permissions=%a $controllermanagerkubeconfig; fi'" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chmod 600 $controllermanagerkubeconfig + scored: true + + - id: 1.1.18 + text: "Ensure that the controller-manager.conf file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $controllermanagerkubeconfig" + tests: + test_items: + - flag: "root:root" + compare: + op: eq + value: "root:root" + set: true + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown root:root $controllermanagerkubeconfig + scored: true + + - id: 1.1.19 + text: "Ensure that the Kubernetes PKI directory and file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G /var/lib/rancher/k3s/server/tls" + use_multiple_values: true + tests: + test_items: + - flag: "root:root" + remediation: | + Run the below command (based on the file location on your system) on the control plane node. + For example, + chown -R root:root /var/lib/rancher/k3s/server/tls + scored: true + + - id: 1.1.20 + text: "Ensure that the Kubernetes PKI certificate file permissions are set to 600 or more restrictive (Manual)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.crt'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.crt + scored: false + + - id: 1.1.21 + text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Automated)" + audit: "/bin/sh -c 'stat -c permissions=%a /var/lib/rancher/k3s/server/tls/*.key'" + use_multiple_values: true + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the master node. + For example, + chmod -R 600 /var/lib/rancher/k3s/server/tls/*.key + scored: true + + - id: 1.2 + text: "API Server" + checks: + - id: 1.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'anonymous-auth'" + tests: + test_items: + - flag: "--anonymous-auth" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --anonymous-auth argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "anonymous-auth=true" + scored: true + + - id: 1.2.2 + text: "Ensure that the --token-auth-file parameter is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--token-auth-file" + set: false + remediation: | + Follow the documentation and configure alternate mechanisms for authentication. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove anything similar to below. + kube-apiserver-arg: + - "token-auth-file=" + scored: true + + - id: 1.2.3 + text: "Ensure that the --DenyServiceExternalIPs is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: "DenyServiceExternalIPs" + set: true + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s does not set DenyServiceExternalIPs. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=DenyServiceExternalIPs" + scored: true + + - id: 1.2.4 + text: "Ensure that the --kubelet-client-certificate and --kubelet-client-key arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: and + test_items: + - flag: "--kubelet-client-certificate" + - flag: "--kubelet-client-key" + remediation: | + By default, K3s automatically provides the kubelet client certificate and key. + They are generated and located at /var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/client-kube-apiserver.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kube-apiserver-arg: + - "kubelet-client-certificate=" + - "kubelet-client-key=" + scored: true + + - id: 1.2.5 + text: "Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'kubelet-certificate-authority'" + tests: + test_items: + - flag: "--kubelet-certificate-authority" + remediation: | + By default, K3s automatically provides the kubelet CA cert file, at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "kubelet-certificate-authority=" + scored: true + + - id: 1.2.6 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: nothave + value: "AlwaysAllow" + remediation: | + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "authorization-mode=AlwaysAllow" + scored: true + + - id: 1.2.7 + text: "Ensure that the --authorization-mode argument includes Node (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "Node" + remediation: | + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. + scored: true + + - id: 1.2.8 + text: "Ensure that the --authorization-mode argument includes RBAC (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'authorization-mode'" + tests: + test_items: + - flag: "--authorization-mode" + compare: + op: has + value: "RBAC" + remediation: | + By default, K3s sets the --authorization-mode to Node and RBAC. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, + ensure that you are not overriding authorization-mode. + scored: true + + - id: 1.2.9 + text: "Ensure that the admission control plugin EventRateLimit is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "EventRateLimit" + remediation: | + Follow the Kubernetes documentation and set the desired limits in a configuration file. + Then, edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameters. + kube-apiserver-arg: + - "enable-admission-plugins=...,EventRateLimit,..." + - "admission-control-config-file=" + scored: false + + - id: 1.2.10 + text: "Ensure that the admission control plugin AlwaysAdmit is not set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: nothave + value: AlwaysAdmit + - flag: "--enable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --enable-admission-plugins to AlwaysAdmit. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-apiserver-arg: + - "enable-admission-plugins=AlwaysAdmit" + scored: true + + - id: 1.2.11 + text: "Ensure that the admission control plugin AlwaysPullImages is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "AlwaysPullImages" + remediation: | + Permissive, per CIS guidelines, + "This setting could impact offline or isolated clusters, which have images pre-loaded and + do not have access to a registry to pull in-use images. This setting is not appropriate for + clusters which use this configuration." + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the below parameter. + kube-apiserver-arg: + - "enable-admission-plugins=...,AlwaysPullImages,..." + scored: false + + - id: 1.2.12 + text: "Ensure that the admission control plugin SecurityContextDeny is set if PodSecurityPolicy is not used (Manual)" + type: "skip" + audit: "/bin/ps -ef | grep $apiserverbin | grep -v grep" + tests: + bin_op: or + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "SecurityContextDeny" + - flag: "--enable-admission-plugins" + compare: + op: has + value: "PodSecurityPolicy" + remediation: | + Not Applicable. + Enabling Pod Security Policy is no longer supported on K3s v1.25+ and will cause applications to unexpectedly fail. + scored: false + + - id: 1.2.13 + text: "Ensure that the admission control plugin ServiceAccount is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "ServiceAccount" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. + Follow the documentation and create ServiceAccount objects as per your environment. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=ServiceAccount" + scored: true + + - id: 1.2.14 + text: "Ensure that the admission control plugin NamespaceLifecycle is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--disable-admission-plugins" + compare: + op: nothave + value: "NamespaceLifecycle" + - flag: "--disable-admission-plugins" + set: false + remediation: | + By default, K3s does not set the --disable-admission-plugins to anything. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "disable-admission-plugins=...,NamespaceLifecycle,..." + scored: true + + - id: 1.2.15 + text: "Ensure that the admission control plugin NodeRestriction is set (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'enable-admission-plugins'" + tests: + test_items: + - flag: "--enable-admission-plugins" + compare: + op: has + value: "NodeRestriction" + remediation: | + By default, K3s sets the --enable-admission-plugins to NodeRestriction. + If using the K3s config file /etc/rancher/k3s/config.yaml, check that you are not overriding the admission plugins. + If you are, include NodeRestriction in the list. + kube-apiserver-arg: + - "enable-admission-plugins=...,NodeRestriction,..." + scored: true + + - id: 1.2.16 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "profiling=true" + scored: true + + - id: 1.2.17 + text: "Ensure that the --audit-log-path argument is set (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-path" + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the audit-log-path parameter to a suitable path and + file where you would like audit logs to be written, for example, + kube-apiserver-arg: + - "audit-log-path=/var/lib/rancher/k3s/server/logs/audit.log" + scored: false + + - id: 1.2.18 + text: "Ensure that the --audit-log-maxage argument is set to 30 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-maxage" + compare: + op: gte + value: 30 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxage parameter to 30 or as an appropriate number of days, for example, + kube-apiserver-arg: + - "audit-log-maxage=30" + scored: false + + - id: 1.2.19 + text: "Ensure that the --audit-log-maxbackup argument is set to 10 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-maxbackup" + compare: + op: gte + value: 10 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxbackup parameter to 10 or to an appropriate value. For example, + kube-apiserver-arg: + - "audit-log-maxbackup=10" + scored: false + + - id: 1.2.20 + text: "Ensure that the --audit-log-maxsize argument is set to 100 or as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--audit-log-maxsize" + compare: + op: gte + value: 100 + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and + set the audit-log-maxsize parameter to an appropriate size in MB. For example, + kube-apiserver-arg: + - "audit-log-maxsize=100" + scored: false + + - id: 1.2.21 + text: "Ensure that the --request-timeout argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--request-timeout" + remediation: | + Permissive, per CIS guidelines, + "it is recommended to set this limit as appropriate and change the default limit of 60 seconds only if needed". + Edit the K3s config file /etc/rancher/k3s/config.yaml + and set the below parameter if needed. For example, + kube-apiserver-arg: + - "request-timeout=300s" + scored: false + + - id: 1.2.22 + text: "Ensure that the --service-account-lookup argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--service-account-lookup" + set: false + - flag: "--service-account-lookup" + compare: + op: eq + value: true + remediation: | + By default, K3s does not set the --service-account-lookup argument. + Edit the K3s config file /etc/rancher/k3s/config.yaml and set the service-account-lookup. For example, + kube-apiserver-arg: + - "service-account-lookup=true" + Alternatively, you can delete the service-account-lookup parameter from this file so + that the default takes effect. + scored: true + + - id: 1.2.23 + text: "Ensure that the --service-account-key-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1" + tests: + test_items: + - flag: "--service-account-key-file" + remediation: | + K3s automatically generates and sets the service account key file. + It is located at /var/lib/rancher/k3s/server/tls/service.key. + If this check fails, edit K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "service-account-key-file=" + scored: true + + - id: 1.2.24 + text: "Ensure that the --etcd-certfile and --etcd-keyfile arguments are set as appropriate (Automated)" + audit: | + if [ "$(journalctl -m -u k3s | grep -m1 'Managed etcd cluster' | wc -l)" -gt 0 ]; then + journalctl -m -u k3s | grep -m1 'Running kube-apiserver' | tail -n1 + else + echo "--etcd-certfile AND --etcd-keyfile" + fi + tests: + bin_op: and + test_items: + - flag: "--etcd-certfile" + set: true + - flag: "--etcd-keyfile" + set: true + remediation: | + K3s automatically generates and sets the etcd certificate and key files. + They are located at /var/lib/rancher/k3s/server/tls/etcd/client.crt and /var/lib/rancher/k3s/server/tls/etcd/client.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-certfile=" + - "etcd-keyfile=" + scored: true + + - id: 1.2.25 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep -A1 'Running kube-apiserver' | tail -n2" + tests: + bin_op: and + test_items: + - flag: "--tls-cert-file" + set: true + - flag: "--tls-private-key-file" + set: true + remediation: | + By default, K3s automatically generates and provides the TLS certificate and private key for the apiserver. + They are generated and located at /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.crt and /var/lib/rancher/k3s/server/tls/serving-kube-apiserver.key + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true + + - id: 1.2.26 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'client-ca-file'" + tests: + test_items: + - flag: "--client-ca-file" + remediation: | + By default, K3s automatically provides the client certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "client-ca-file=" + scored: true + + - id: 1.2.27 + text: "Ensure that the --etcd-cafile argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'etcd-cafile'" + tests: + test_items: + - flag: "--etcd-cafile" + remediation: | + By default, K3s automatically provides the etcd certificate authority file. + It is generated and located at /var/lib/rancher/k3s/server/tls/client-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-apiserver-arg: + - "etcd-cafile=" + scored: true + + - id: 1.2.28 + text: "Ensure that the --encryption-provider-config argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'encryption-provider-config'" + tests: + test_items: + - flag: "--encryption-provider-config" + remediation: | + K3s can be configured to use encryption providers to encrypt secrets at rest. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json. + scored: false + + - id: 1.2.29 + text: "Ensure that encryption providers are appropriately configured (Manual)" + audit: | + ENCRYPTION_PROVIDER_CONFIG=$(journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep -- --encryption-provider-config | sed 's%.*encryption-provider-config[= ]\([^ ]*\).*%\1%') + if test -e $ENCRYPTION_PROVIDER_CONFIG; then grep -o 'providers\"\:\[.*\]' $ENCRYPTION_PROVIDER_CONFIG | grep -o "[A-Za-z]*" | head -2 | tail -1 | sed 's/^/provider=/'; fi + tests: + test_items: + - flag: "provider" + compare: + op: valid_elements + value: "aescbc,kms,secretbox" + remediation: | + K3s can be configured to use encryption providers to encrypt secrets at rest. K3s will utilize the aescbc provider. + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node and set the below parameter. + secrets-encryption: true + Secrets encryption can then be managed with the k3s secrets-encrypt command line tool. + If needed, you can find the generated encryption config at /var/lib/rancher/k3s/server/cred/encryption-config.json + scored: false + + - id: 1.2.30 + text: "Ensure that the API Server only makes use of Strong Cryptographic Ciphers (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-apiserver' | tail -n1 | grep 'tls-cipher-suites'" + tests: + test_items: + - flag: "--tls-cipher-suites" + compare: + op: valid_elements + value: "TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_RSA_WITH_3DES_EDE_CBC_SHA,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + By default, the K3s kube-apiserver complies with this test. Changes to these values may cause regression, therefore ensure that all apiserver clients support the new TLS configuration before applying it in production deployments. + If a custom TLS configuration is required, consider also creating a custom version of this rule that aligns with your requirements. + If this check fails, remove any custom configuration around `tls-cipher-suites` or update the /etc/rancher/k3s/config.yaml file to match the default by adding the following: + kube-apiserver-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + scored: true + + - id: 1.3 + text: "Controller Manager" + checks: + - id: 1.3.1 + text: "Ensure that the --terminated-pod-gc-threshold argument is set as appropriate (Manual)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'terminated-pod-gc-threshold'" + tests: + test_items: + - flag: "--terminated-pod-gc-threshold" + remediation: | + Edit the K3s config file /etc/rancher/k3s/config.yaml on the control plane node + and set the --terminated-pod-gc-threshold to an appropriate threshold, + kube-controller-manager-arg: + - "terminated-pod-gc-threshold=10" + scored: false + + - id: 1.3.2 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "profiling=true" + scored: true + + - id: 1.3.3 + text: "Ensure that the --use-service-account-credentials argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'use-service-account-credentials'" + tests: + test_items: + - flag: "--use-service-account-credentials" + compare: + op: noteq + value: false + remediation: | + By default, K3s sets the --use-service-account-credentials argument to true. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "use-service-account-credentials=false" + scored: true + + - id: 1.3.4 + text: "Ensure that the --service-account-private-key-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'service-account-private-key-file'" + tests: + test_items: + - flag: "--service-account-private-key-file" + remediation: | + By default, K3s automatically provides the service account private key file. + It is generated and located at /var/lib/rancher/k3s/server/tls/service.current.key. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "service-account-private-key-file=" + scored: true + + - id: 1.3.5 + text: "Ensure that the --root-ca-file argument is set as appropriate (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1 | grep 'root-ca-file'" + tests: + test_items: + - flag: "--root-ca-file" + remediation: | + By default, K3s automatically provides the root CA file. + It is generated and located at /var/lib/rancher/k3s/server/tls/server-ca.crt. + If for some reason you need to provide your own ca certificate, look at using the k3s certificate command line tool. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "root-ca-file=" + scored: true + + - id: 1.3.6 + text: "Ensure that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--feature-gates" + compare: + op: nothave + value: "RotateKubeletServerCertificate=false" + set: true + - flag: "--feature-gates" + set: false + remediation: | + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml, remove any lines like below. + kube-controller-manager-arg: + - "feature-gate=RotateKubeletServerCertificate" + scored: true + + - id: 1.3.7 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-controller-manager' | tail -n1" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-controller-manager-arg: + - "bind-address=" + scored: true + + - id: 1.4 + text: "Scheduler" + checks: + - id: 1.4.1 + text: "Ensure that the --profiling argument is set to false (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'profiling'" + tests: + test_items: + - flag: "--profiling" + compare: + op: eq + value: false + set: true + remediation: | + By default, K3s sets the --profiling argument to false. + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "profiling=true" + scored: true + + - id: 1.4.2 + text: "Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)" + audit: "journalctl -m -u k3s | grep 'Running kube-scheduler' | tail -n1 | grep 'bind-address'" + tests: + bin_op: or + test_items: + - flag: "--bind-address" + compare: + op: eq + value: "127.0.0.1" + set: true + - flag: "--bind-address" + set: false + remediation: | + By default, K3s sets the --bind-address argument to 127.0.0.1 + If this check fails, edit the K3s config file /etc/rancher/k3s/config.yaml and remove any lines like below. + kube-scheduler-arg: + - "bind-address=" + scored: true diff --git a/cfg/k3s-cis-1.8/node.yaml b/cfg/k3s-cis-1.8/node.yaml new file mode 100644 index 000000000..7a238a1a8 --- /dev/null +++ b/cfg/k3s-cis-1.8/node.yaml @@ -0,0 +1,422 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 4 +text: "Worker Node Security Configuration" +type: "node" +groups: + - id: 4.1 + text: "Worker Node Configuration Files" + checks: + - id: 4.1.1 + text: "Ensure that the kubelet service file permissions are set to 600 or more restrictive (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c permissions=%a $kubeletsvc; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.2 + text: "Ensure that the kubelet service file ownership is set to root:root (Automated)" + type: "skip" + audit: '/bin/sh -c ''if test -e $kubeletsvc; then stat -c %U:%G $kubeletsvc; fi'' ' + tests: + test_items: + - flag: root:root + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet service file, all configuration is passed in as arguments at runtime. + Not Applicable. + All configuration is passed in as arguments at container run time. + scored: true + + - id: 4.1.3 + text: "If proxy kubeconfig file exists ensure permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c permissions=%a $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $proxykubeconfig + scored: true + + - id: 4.1.4 + text: "If proxy kubeconfig file exists ensure ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $proxykubeconfig; then stat -c %U:%G $proxykubeconfig; fi'' ' + tests: + bin_op: or + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, chown root:root $proxykubeconfig + scored: true + + - id: 4.1.5 + text: "Ensure that the --kubeconfig kubelet.conf file permissions are set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletkubeconfig; then stat -c permissions=%a $kubeletkubeconfig; fi'' ' + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chmod 600 $kubeletkubeconfig + scored: true + + - id: 4.1.6 + text: "Ensure that the --kubeconfig kubelet.conf file ownership is set to root:root (Automated)" + audit: 'stat -c %U:%G $kubeletkubeconfig' + tests: + test_items: + - flag: root:root + remediation: | + Run the below command (based on the file location on your system) on the each worker node. + For example, + chown root:root $kubeletkubeconfig + scored: true + + - id: 4.1.7 + text: "Ensure that the certificate authorities file permissions are set to 600 or more restrictive (Automated)" + audit: "stat -c permissions=%a $kubeletcafile" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Run the following command to modify the file permissions of the + --client-ca-file chmod 600 $kubeletcafile + scored: true + + - id: 4.1.8 + text: "Ensure that the client certificate authorities file ownership is set to root:root (Automated)" + audit: "stat -c %U:%G $kubeletcafile" + tests: + test_items: + - flag: root:root + compare: + op: eq + value: root:root + remediation: | + Run the following command to modify the ownership of the --client-ca-file. + chown root:root $kubeletcafile + scored: true + + - id: 4.1.9 + text: "Ensure that the kubelet --config configuration file has permissions set to 600 or more restrictive (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c permissions=%a $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: "permissions" + compare: + op: bitmask + value: "600" + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.1.10 + text: "Ensure that the kubelet --config configuration file ownership is set to root:root (Automated)" + audit: '/bin/sh -c ''if test -e $kubeletconf; then stat -c %U:%G $kubeletconf; fi'' ' + type: "skip" + tests: + test_items: + - flag: root:root + remediation: | + Not Applicable. + The kubelet is embedded in the k3s process. There is no kubelet config file, all configuration is passed in as arguments at runtime. + scored: true + + - id: 4.2 + text: "Kubelet" + checks: + - id: 4.2.1 + text: "Ensure that the --anonymous-auth argument is set to false (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "anonymous-auth" | grep -v grep; else echo "--anonymous-auth=false"; fi'' ' + tests: + test_items: + - flag: "--anonymous-auth" + path: '{.authentication.anonymous.enabled}' + compare: + op: eq + value: false + remediation: | + By default, K3s sets the --anonymous-auth to false. If you have set this to a different value, you + should set it back to false. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "anonymous-auth=true" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="anonymous-auth=true" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.2 + text: "Ensure that the --authorization-mode argument is not set to AlwaysAllow (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "authorization-mode"; else echo "--authorization-mode=Webhook"; fi'' ' + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --authorization-mode + path: '{.authorization.mode}' + compare: + op: nothave + value: AlwaysAllow + remediation: | + By default, K3s does not set the --authorization-mode to AlwaysAllow. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "authorization-mode=AlwaysAllow" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="authorization-mode=AlwaysAllow" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.3 + text: "Ensure that the --client-ca-file argument is set as appropriate (Automated)" + audit: '/bin/sh -c ''if test $(journalctl -m -u k3s | grep "Running kube-apiserver" | wc -l) -gt 0; then journalctl -m -u k3s | grep "Running kube-apiserver" | tail -n1 | grep "client-ca-file"; else echo "--client-ca-file=/var/lib/rancher/k3s/server/tls/request-header-ca.crt"; fi'' ' + tests: + test_items: + - flag: --client-ca-file + path: '{.authentication.x509.clientCAFile}' + remediation: | + By default, K3s automatically provides the client ca certificate for the Kubelet. + It is generated and located at /var/lib/rancher/k3s/agent/client-ca.crt + scored: true + + - id: 4.2.4 + text: "Verify that the --read-only-port argument is set to 0 (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + bin_op: or + test_items: + - flag: "--read-only-port" + path: '{.readOnlyPort}' + compare: + op: eq + value: 0 + - flag: "--read-only-port" + path: '{.readOnlyPort}' + set: false + remediation: | + By default, K3s sets the --read-only-port to 0. If you have set this to a different value, you + should set it back to 0. If using the K3s config file /etc/rancher/k3s/config.yaml, remove any lines similar to below. + kubelet-arg: + - "read-only-port=XXXX" + If using the command line, edit the K3s service file and remove the below argument. + --kubelet-arg="read-only-port=XXXX" + Based on your system, restart the k3s service. For example, + systemctl daemon-reload + systemctl restart k3s.service + scored: true + + - id: 4.2.5 + text: "Ensure that the --streaming-connection-idle-timeout argument is not set to 0 (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + compare: + op: noteq + value: 0 + - flag: --streaming-connection-idle-timeout + path: '{.streamingConnectionIdleTimeout}' + set: false + bin_op: or + remediation: | + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "streaming-connection-idle-timeout=5m" + If using the command line, run K3s with --kubelet-arg="streaming-connection-idle-timeout=5m". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.6 + text: "Ensure that the --make-iptables-util-chains argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + compare: + op: eq + value: true + - flag: --make-iptables-util-chains + path: '{.makeIPTablesUtilChains}' + set: false + bin_op: or + remediation: | + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter. + kubelet-arg: + - "make-iptables-util-chains=true" + If using the command line, run K3s with --kubelet-arg="make-iptables-util-chains=true". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.7 + text: "Ensure that the --hostname-override argument is not set (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + type: "skip" + tests: + test_items: + - flag: --hostname-override + set: false + remediation: | + Not Applicable. + By default, K3s does set the --hostname-override argument. Per CIS guidelines, this is to comply + with cloud providers that require this flag to ensure that hostname matches node names. + scored: true + + - id: 4.2.8 + text: "Ensure that the eventRecordQPS argument is set to a level which ensures appropriate event capture (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --event-qps + path: '{.eventRecordQPS}' + compare: + op: gte + value: 0 + - flag: --event-qps + path: '{.eventRecordQPS}' + set: false + bin_op: or + remediation: | + By default, K3s sets the event-qps to 0. Should you wish to change this, + If using the K3s config file /etc/rancher/k3s/config.yaml, set the following parameter to an appropriate value. + kubelet-arg: + - "event-qps=" + If using the command line, run K3s with --kubelet-arg="event-qps=". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.9 + text: "Ensure that the --tls-cert-file and --tls-private-key-file arguments are set as appropriate (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + tests: + test_items: + - flag: --tls-cert-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.crt' + - flag: --tls-private-key-file + path: '/var/lib/rancher/k3s/agent/serving-kubelet.key' + remediation: | + By default, K3s automatically provides the TLS certificate and private key for the Kubelet. + They are generated and located at /var/lib/rancher/k3s/agent/serving-kubelet.crt and /var/lib/rancher/k3s/agent/serving-kubelet.key + If for some reason you need to provide your own certificate and key, you can set the + below parameters in the K3s config file /etc/rancher/k3s/config.yaml. + kubelet-arg: + - "tls-cert-file=" + - "tls-private-key-file=" + scored: true + + - id: 4.2.10 + text: "Ensure that the --rotate-certificates argument is not set to false (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --rotate-certificates + path: '{.rotateCertificates}' + compare: + op: eq + value: true + - flag: --rotate-certificates + path: '{.rotateCertificates}' + set: false + bin_op: or + remediation: | + By default, K3s does not set the --rotate-certificates argument. If you have set this flag with a value of `false`, you should either set it to `true` or completely remove the flag. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any rotate-certificates parameter. + If using the command line, remove the K3s flag --kubelet-arg="rotate-certificates". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.11 + text: "Verify that the RotateKubeletServerCertificate argument is set to true (Automated)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/cat $kubeletconf" + tests: + bin_op: or + test_items: + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + compare: + op: nothave + value: false + - flag: RotateKubeletServerCertificate + path: '{.featureGates.RotateKubeletServerCertificate}' + set: false + remediation: | + By default, K3s does not set the RotateKubeletServerCertificate feature gate. + If you have enabled this feature gate, you should remove it. + If using the K3s config file /etc/rancher/k3s/config.yaml, remove any feature-gate=RotateKubeletServerCertificate parameter. + If using the command line, remove the K3s flag --kubelet-arg="feature-gate=RotateKubeletServerCertificate". + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: true + + - id: 4.2.12 + text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --tls-cipher-suites + path: '{range .tlsCipherSuites[:]}{}{'',''}{end}' + compare: + op: valid_elements + value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 + remediation: | + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `tlsCipherSuites` to + kubelet-arg: + - "tls-cipher-suites=TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + or to a subset of these values. + If using the command line, add the K3s flag --kubelet-arg="tls-cipher-suites=" + Based on your system, restart the k3s service. For example, + systemctl restart k3s.service + scored: false + + - id: 4.2.13 + text: "Ensure that a limit is set on pod PIDs (Manual)" + audit: "journalctl -m -u k3s -u k3s-agent | grep 'Running kubelet' | tail -n1" + audit_config: "/bin/sh -c 'if test -e $kubeletconf; then /bin/cat $kubeletconf; fi' " + tests: + test_items: + - flag: --pod-max-pids + path: '{.podPidsLimit}' + remediation: | + Decide on an appropriate level for this parameter and set it, + If using a K3s config file /etc/rancher/k3s/config.yaml, edit the file to set `podPidsLimit` to + kubelet-arg: + - "pod-max-pids=" + scored: false diff --git a/cfg/k3s-cis-1.8/policies.yaml b/cfg/k3s-cis-1.8/policies.yaml new file mode 100644 index 000000000..29c65ef42 --- /dev/null +++ b/cfg/k3s-cis-1.8/policies.yaml @@ -0,0 +1,300 @@ +--- +controls: +version: "k3s-cis-1.8" +id: 5 +text: "Kubernetes Policies" +type: "policies" +groups: + - id: 5.1 + text: "RBAC and Service Accounts" + checks: + - id: 5.1.1 + text: "Ensure that the cluster-admin role is only used where required (Manual)" + type: "manual" + remediation: | + Identify all clusterrolebindings to the cluster-admin role. Check if they are used and + if they need this role or if they could use a role with fewer privileges. + Where possible, first bind users to a lower privileged role and then remove the + clusterrolebinding to the cluster-admin role : + kubectl delete clusterrolebinding [name] + scored: false + + - id: 5.1.2 + text: "Minimize access to secrets (Manual)" + type: "manual" + remediation: | + Where possible, remove get, list and watch access to Secret objects in the cluster. + scored: false + + - id: 5.1.3 + text: "Minimize wildcard use in Roles and ClusterRoles (Manual)" + type: "manual" + remediation: | + Where possible replace any use of wildcards in clusterroles and roles with specific + objects or actions. + scored: false + + - id: 5.1.4 + text: "Minimize access to create pods (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to pod objects in the cluster. + scored: false + + - id: 5.1.5 + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" + remediation: | + Create explicit service accounts wherever a Kubernetes workload requires specific access + to the Kubernetes API server. + Modify the configuration of each default service account to include this value + automountServiceAccountToken: false + scored: false + + + - id: 5.1.6 + text: "Ensure that Service Account Tokens are only mounted where necessary (Manual)" + type: "manual" + remediation: | + Modify the definition of pods and service accounts which do not need to mount service + account tokens to disable it. + scored: false + + - id: 5.1.7 + text: "Avoid use of system:masters group (Manual)" + type: "manual" + remediation: | + Remove the system:masters group from all users in the cluster. + scored: false + + - id: 5.1.8 + text: "Limit use of the Bind, Impersonate and Escalate permissions in the Kubernetes cluster (Manual)" + type: "manual" + remediation: | + Where possible, remove the impersonate, bind and escalate rights from subjects. + scored: false + + - id: 5.1.9 + text: "Minimize access to create persistent volumes (Manual)" + type: "manual" + remediation: | + Where possible, remove create access to PersistentVolume objects in the cluster. + scored: false + + - id: 5.1.10 + text: "Minimize access to the proxy sub-resource of nodes (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the proxy sub-resource of node objects. + scored: false + + - id: 5.1.11 + text: "Minimize access to the approval sub-resource of certificatesigningrequests objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the approval sub-resource of certificatesigningrequest objects. + scored: false + + - id: 5.1.12 + text: "Minimize access to webhook configuration objects (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the validatingwebhookconfigurations or mutatingwebhookconfigurations objects + scored: false + + - id: 5.1.13 + text: "Minimize access to the service account token creation (Manual)" + type: "manual" + remediation: | + Where possible, remove access to the token sub-resource of serviceaccount objects. + scored: false + + - id: 5.2 + text: "Pod Security Standards" + checks: + - id: 5.2.1 + text: "Ensure that the cluster has at least one active policy control mechanism in place (Manual)" + type: "manual" + remediation: | + Ensure that either Pod Security Admission or an external policy control system is in place + for every namespace which contains user workloads. + scored: false + + - id: 5.2.2 + text: "Minimize the admission of privileged containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of privileged containers. + scored: false + + - id: 5.2.3 + text: "Minimize the admission of containers wishing to share the host process ID namespace (Automated)" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostPID` containers. + scored: false + + - id: 5.2.4 + text: "Minimize the admission of containers wishing to share the host IPC namespace (Automated)" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostIPC` containers. + scored: false + + - id: 5.2.5 + text: "Minimize the admission of containers wishing to share the host network namespace (Automated)" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of `hostNetwork` containers. + scored: false + + - id: 5.2.6 + text: "Minimize the admission of containers with allowPrivilegeEscalation (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `.spec.allowPrivilegeEscalation` set to `true`. + scored: true + + - id: 5.2.7 + text: "Minimize the admission of root containers (Automated)" + type: "manual" + remediation: | + Create a policy for each namespace in the cluster, ensuring that either `MustRunAsNonRoot` + or `MustRunAs` with the range of UIDs not including 0, is set. + scored: false + + - id: 5.2.8 + text: "Minimize the admission of containers with the NET_RAW capability (Automated)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with the `NET_RAW` capability. + scored: false + + - id: 5.2.9 + text: "Minimize the admission of containers with added capabilities (Automated)" + type: "manual" + remediation: | + Ensure that `allowedCapabilities` is not present in policies for the cluster unless + it is set to an empty array. + scored: false + + - id: 5.2.10 + text: "Minimize the admission of containers with capabilities assigned (Manual)" + type: "manual" + remediation: | + Review the use of capabilities in applications running on your cluster. Where a namespace + contains applications which do not require any Linux capabities to operate consider adding + a PSP which forbids the admission of containers which do not drop all capabilities. + scored: false + + - id: 5.2.11 + text: "Minimize the admission of Windows HostProcess containers (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers that have `.securityContext.windowsOptions.hostProcess` set to `true`. + scored: false + + - id: 5.2.12 + text: "Minimize the admission of HostPath volumes (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers with `hostPath` volumes. + scored: false + + - id: 5.2.13 + text: "Minimize the admission of containers which use HostPorts (Manual)" + type: "manual" + remediation: | + Add policies to each namespace in the cluster which has user workloads to restrict the + admission of containers which use `hostPort` sections. + scored: false + + - id: 5.3 + text: "Network Policies and CNI" + checks: + - id: 5.3.1 + text: "Ensure that the CNI in use supports NetworkPolicies (Manual)" + type: "manual" + remediation: | + If the CNI plugin in use does not support network policies, consideration should be given to + making use of a different plugin, or finding an alternate mechanism for restricting traffic + in the Kubernetes cluster. + scored: false + + - id: 5.3.2 + text: "Ensure that all Namespaces have NetworkPolicies defined (Manual)" + remediation: | + Follow the documentation and create NetworkPolicy objects as you need them. + scored: false + + - id: 5.4 + text: "Secrets Management" + checks: + - id: 5.4.1 + text: "Prefer using Secrets as files over Secrets as environment variables (Manual)" + type: "manual" + remediation: | + If possible, rewrite application code to read Secrets from mounted secret files, rather than + from environment variables. + scored: false + + - id: 5.4.2 + text: "Consider external secret storage (Manual)" + type: "manual" + remediation: | + Refer to the Secrets management options offered by your cloud provider or a third-party + secrets management solution. + scored: false + + - id: 5.5 + text: "Extensible Admission Control" + checks: + - id: 5.5.1 + text: "Configure Image Provenance using ImagePolicyWebhook admission controller (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and setup image provenance. + scored: false + + - id: 5.7 + text: "General Policies" + checks: + - id: 5.7.1 + text: "Create administrative boundaries between resources using namespaces (Manual)" + type: "manual" + remediation: | + Follow the documentation and create namespaces for objects in your deployment as you need + them. + scored: false + + - id: 5.7.2 + text: "Ensure that the seccomp profile is set to docker/default in your Pod definitions (Manual)" + type: "manual" + remediation: | + Use `securityContext` to enable the docker/default seccomp profile in your pod definitions. + An example is as below: + securityContext: + seccompProfile: + type: RuntimeDefault + scored: false + + - id: 5.7.3 + text: "Apply SecurityContext to your Pods and Containers (Manual)" + type: "manual" + remediation: | + Follow the Kubernetes documentation and apply SecurityContexts to your Pods. For a + suggested list of SecurityContexts, you may refer to the CIS Security Benchmark for Docker + Containers. + scored: false + + - id: 5.7.4 + text: "The default namespace should not be used (Manual)" + remediation: | + Ensure that namespaces are created to allow for appropriate segregation of Kubernetes + resources and that all new resources are created in a specific namespace. + scored: false diff --git a/cfg/rh-1.0/node.yaml b/cfg/rh-1.0/node.yaml index fb982d6f3..fb5a47324 100644 --- a/cfg/rh-1.0/node.yaml +++ b/cfg/rh-1.0/node.yaml @@ -48,7 +48,7 @@ groups: echo "No matching pods found on the current node." else # Execute the stat command - oc exec -n openshift-sdn "$POD_NAME" - stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null + oc exec -n openshift-sdn "$POD_NAME" -- stat -Lc "$i %n permissions=%a" /config/kube-proxy-config.yaml 2>/dev/null fi tests: bin_op: or diff --git a/cfg/rke-cis-1.23/node.yaml b/cfg/rke-cis-1.23/node.yaml index ea13f945d..a509ed743 100644 --- a/cfg/rke-cis-1.23/node.yaml +++ b/cfg/rke-cis-1.23/node.yaml @@ -453,7 +453,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/rke-cis-1.23/policies.yaml b/cfg/rke-cis-1.23/policies.yaml index 611d2c82c..5e4c6d640 100644 --- a/cfg/rke-cis-1.23/policies.yaml +++ b/cfg/rke-cis-1.23/policies.yaml @@ -42,16 +42,8 @@ groups: scored: false - id: 5.1.5 - text: "Ensure that default service accounts are not actively used. (Automated)" - type: "skip" - audit: check_for_default_sa.sh - tests: - test_items: - - flag: "true" - compare: - op: eq - value: "true" - set: true + text: "Ensure that default service accounts are not actively used. (Manual)" + type: "manual" remediation: | Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server. diff --git a/cfg/rke-cis-1.24/node.yaml b/cfg/rke-cis-1.24/node.yaml index ca5dcc1ad..653f1b754 100644 --- a/cfg/rke-cis-1.24/node.yaml +++ b/cfg/rke-cis-1.24/node.yaml @@ -446,7 +446,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/rke-cis-1.24/policies.yaml b/cfg/rke-cis-1.24/policies.yaml index 362771a65..2b8dba3e1 100644 --- a/cfg/rke-cis-1.24/policies.yaml +++ b/cfg/rke-cis-1.24/policies.yaml @@ -44,14 +44,6 @@ groups: - id: 5.1.5 text: "Ensure that default service accounts are not actively used. (Manual)" type: "manual" - audit: check_for_default_sa.sh - tests: - test_items: - - flag: "true" - compare: - op: eq - value: "true" - set: true remediation: | Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server. diff --git a/cfg/rke-cis-1.7/node.yaml b/cfg/rke-cis-1.7/node.yaml index ff5731732..abc9d12af 100644 --- a/cfg/rke-cis-1.7/node.yaml +++ b/cfg/rke-cis-1.7/node.yaml @@ -436,7 +436,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/rke-cis-1.7/policies.yaml b/cfg/rke-cis-1.7/policies.yaml index a543875c3..207dd7808 100644 --- a/cfg/rke-cis-1.7/policies.yaml +++ b/cfg/rke-cis-1.7/policies.yaml @@ -43,21 +43,12 @@ groups: - id: 5.1.5 text: "Ensure that default service accounts are not actively used. (Manual)" - type: "skip" - audit: check_for_default_sa.sh - tests: - test_items: - - flag: "true" - compare: - op: eq - value: "true" - set: true + type: "manual" remediation: | Create explicit service accounts wherever a Kubernetes workload requires specific access to the Kubernetes API server. Modify the configuration of each default service account to include this value automountServiceAccountToken: false - Permissive - Kubernetes provides default service accounts to be used. scored: false - id: 5.1.6 diff --git a/cfg/rke2-cis-1.23/node.yaml b/cfg/rke2-cis-1.23/node.yaml index bbb015f32..b7d97a0e3 100644 --- a/cfg/rke2-cis-1.23/node.yaml +++ b/cfg/rke2-cis-1.23/node.yaml @@ -453,7 +453,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/rke2-cis-1.24/master.yaml b/cfg/rke2-cis-1.24/master.yaml index 13afa29ea..a11048d14 100644 --- a/cfg/rke2-cis-1.24/master.yaml +++ b/cfg/rke2-cis-1.24/master.yaml @@ -148,12 +148,18 @@ groups: - id: 1.1.10 text: "Ensure that the Container Network Interface file ownership is set to root:root (Manual)" audit: | - ps -fC ${kubeletbin:-kubelet} | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G + '/bin/sh -c "if [[ -e /etc/cni/net.d ]]; then + ps -fC "${kubeletbin:-kubelet}" | grep -- --cni-conf-dir || echo "/etc/cni/net.d" | sed 's%.*cni-conf-dir[= ]\([^ ]*\).*%\1%' | xargs -I{} find {} -mindepth 1 | xargs --no-run-if-empty stat -c %U:%G find /var/lib/cni/networks -type f 2> /dev/null | xargs --no-run-if-empty stat -c %U:%G + else + echo "File not found" + fi' use_multiple_values: true tests: + bin_op: or test_items: - flag: "root:root" + - flag: "File not found" remediation: | Run the below command (based on the file location on your system) on the control plane node. For example, @@ -321,11 +327,18 @@ groups: - id: 1.1.21 text: "Ensure that the Kubernetes PKI key file permissions are set to 600 (Manual)" - audit: "stat -c permissions=%a /var/lib/rancher/rke2/server/tls/*.key" + audit: | + '/bin/sh -c if test -e "/var/lib/rancher/rke2/server/tls/*.key"; then + stat -c "%a" "/var/lib/rancher/rke2/server/tls/*.key" + else + echo "File not found" + fi' use_multiple_values: true tests: + bin_op: or test_items: - flag: "permissions" + - flag: "File not found" compare: op: eq value: "600" @@ -979,7 +992,7 @@ groups: Edit the Controller Manager pod specification file $controllermanagerconf on the control plane node and set the --feature-gates parameter to include RotateKubeletServerCertificate=true. --feature-gates=RotateKubeletServerCertificate=true - scored: true + scored: false type: skip - id: 1.3.7 diff --git a/cfg/rke2-cis-1.24/node.yaml b/cfg/rke2-cis-1.24/node.yaml index b99703fc4..dcbe85793 100644 --- a/cfg/rke2-cis-1.24/node.yaml +++ b/cfg/rke2-cis-1.24/node.yaml @@ -440,7 +440,7 @@ groups: systemctl daemon-reload systemctl restart kubelet.service scored: false - + type: skip - id: 4.2.13 text: "Ensure that the Kubelet only makes use of Strong Cryptographic Ciphers (Manual)" audit: "/bin/ps -fC $kubeletbin" @@ -453,7 +453,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/rke2-cis-1.7/node.yaml b/cfg/rke2-cis-1.7/node.yaml index 155aef7d4..a390e39a5 100644 --- a/cfg/rke2-cis-1.7/node.yaml +++ b/cfg/rke2-cis-1.7/node.yaml @@ -432,7 +432,7 @@ groups: op: valid_elements value: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 remediation: | - If using a Kubelet config file, edit the file to set `TLSCipherSuites` to + If using a Kubelet config file, edit the file to set `tlsCipherSuites` to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/cfg/tkgi-1.2.53/node.yaml b/cfg/tkgi-1.2.53/node.yaml index 8e0f09591..9fafab944 100644 --- a/cfg/tkgi-1.2.53/node.yaml +++ b/cfg/tkgi-1.2.53/node.yaml @@ -405,7 +405,7 @@ groups: op: regex value: (TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256|TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256|TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305|TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384|TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305|TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384|TLS_RSA_WITH_AES_256_GCM_SHA384|TLS_RSA_WITH_AES_128_GCM_SHA256) remediation: | - If using a Kubelet config file, edit the file to set TLSCipherSuites: to + If using a Kubelet config file, edit the file to set tlsCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/check/controls.go b/check/controls.go index 15db8312b..171835058 100644 --- a/check/controls.go +++ b/check/controls.go @@ -252,7 +252,7 @@ func (controls *Controls) ASFF() ([]types.AwsSecurityFinding, error) { f := types.AwsSecurityFinding{ AwsAccountId: aws.String(account), - Confidence: *aws.Int32(100), + Confidence: aws.Int32(100), GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", arn, controls.Version, check.ID)), Id: id, CreatedAt: aws.String(tf), diff --git a/check/controls_test.go b/check/controls_test.go index c6d7f1c59..c2f6ab36e 100644 --- a/check/controls_test.go +++ b/check/controls_test.go @@ -407,7 +407,7 @@ func TestControls_ASFF(t *testing.T) { want: []types.AwsSecurityFinding{ { AwsAccountId: aws.String("foo account"), - Confidence: *aws.Int32(100), + Confidence: aws.Int32(100), GeneratorId: aws.String(fmt.Sprintf("%s/cis-kubernetes-benchmark/%s/%s", fmt.Sprintf(ARN, "somewhere"), "1", "check1id")), Description: aws.String("check1text"), ProductArn: aws.String(fmt.Sprintf(ARN, "somewhere")), diff --git a/cmd/common_test.go b/cmd/common_test.go index 3627c9865..53793a000 100644 --- a/cmd/common_test.go +++ b/cmd/common_test.go @@ -242,6 +242,9 @@ func TestMapToCISVersion(t *testing.T) { {kubeVersion: "1.24", succeed: true, exp: "cis-1.24"}, {kubeVersion: "1.25", succeed: true, exp: "cis-1.7"}, {kubeVersion: "1.26", succeed: true, exp: "cis-1.8"}, + {kubeVersion: "1.27", succeed: true, exp: "cis-1.9"}, + {kubeVersion: "1.28", succeed: true, exp: "cis-1.9"}, + {kubeVersion: "1.29", succeed: true, exp: "cis-1.9"}, {kubeVersion: "gke-1.2.0", succeed: true, exp: "gke-1.2.0"}, {kubeVersion: "ocp-3.10", succeed: true, exp: "rh-0.7"}, {kubeVersion: "ocp-3.11", succeed: true, exp: "rh-0.7"}, diff --git a/cmd/util.go b/cmd/util.go index 95c0b2639..275de2326 100644 --- a/cmd/util.go +++ b/cmd/util.go @@ -494,6 +494,8 @@ func getPlatformBenchmarkVersion(platform Platform) string { switch platform.Version { case "1.15", "1.16", "1.17", "1.18", "1.19": return "gke-1.0" + case "1.29", "1.30", "1.31": + return "gke-1.6.0" default: return "gke-1.2.0" } diff --git a/docs/architecture.md b/docs/architecture.md index 09d081e30..c65978f0a 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -22,8 +22,10 @@ The following table shows the valid targets based on the CIS Benchmark version. | cis-1.24 | master, controlplane, node, etcd, policies | | cis-1.7 | master, controlplane, node, etcd, policies | | cis-1.8 | master, controlplane, node, etcd, policies | +| cis-1.9 | master, controlplane, node, etcd, policies | | gke-1.0 | master, controlplane, node, etcd, policies, managedservices | | gke-1.2.0 | controlplane, node, policies, managedservices | +| gke-1.6.0 | controlplane, node, policies, managedservices | | eks-1.0.1 | controlplane, node, policies, managedservices | | eks-1.1.0 | controlplane, node, policies, managedservices | | eks-1.2.0 | controlplane, node, policies, managedservices | diff --git a/docs/platforms.md b/docs/platforms.md index 92b6c7d10..d6fbcf712 100644 --- a/docs/platforms.md +++ b/docs/platforms.md @@ -17,8 +17,10 @@ Some defined by other hardenening guides. | CIS | [1.24](https://workbench.cisecurity.org/benchmarks/10873) | cis-1.24 | 1.24 | | CIS | [1.7](https://workbench.cisecurity.org/benchmarks/11107) | cis-1.7 | 1.25 | | CIS | [1.8](https://workbench.cisecurity.org/benchmarks/12958) | cis-1.8 | 1.26 | +| CIS | [1.9](https://workbench.cisecurity.org/benchmarks/16828) | cis-1.9 | 1.27-1.29 | | CIS | [GKE 1.0.0](https://workbench.cisecurity.org/benchmarks/4536) | gke-1.0 | GKE | | CIS | [GKE 1.2.0](https://workbench.cisecurity.org/benchmarks/7534) | gke-1.2.0 | GKE | +| CIS | [GKE 1.6.0](https://workbench.cisecurity.org/benchmarks/16093) | gke-1.6.0 | GKE | | CIS | [EKS 1.0.1](https://workbench.cisecurity.org/benchmarks/6041) | eks-1.0.1 | EKS | | CIS | [EKS 1.1.0](https://workbench.cisecurity.org/benchmarks/6248) | eks-1.1.0 | EKS | | CIS | [EKS 1.2.0](https://workbench.cisecurity.org/benchmarks/9681) | eks-1.2.0 | EKS | diff --git a/docs/running.md b/docs/running.md index 0ee670ff8..c482a78b6 100644 --- a/docs/running.md +++ b/docs/running.md @@ -92,7 +92,7 @@ aws ecr create-repository --repository-name k8s/kube-bench --image-tag-mutabilit git clone https://github.com/aquasecurity/kube-bench.git cd kube-bench aws ecr get-login-password --region | docker login --username AWS --password-stdin .dkr.ecr..amazonaws.com -docker build -t k8s/kube-bench . +make build-docker IMAGE_NAME=k8s/kube-bench docker tag k8s/kube-bench:latest .dkr.ecr..amazonaws.com/k8s/kube-bench:latest docker push .dkr.ecr..amazonaws.com/k8s/kube-bench:latest ``` @@ -154,8 +154,9 @@ oc apply -f job.yaml | ------------- | ----------------------------------------------------------- | | gke-1.0 | master, controlplane, node, etcd, policies, managedservices | | gke-1.2.0 | master, controlplane, node, policies, managedservices | +| gke-1.6.0 | master, controlplane, node, policies, managedservices | -kube-bench includes benchmarks for GKE. To run this you will need to specify `--benchmark gke-1.0` or `--benchmark gke-1.2.0` when you run the `kube-bench` command. +kube-bench includes benchmarks for GKE. To run this you will need to specify `--benchmark gke-1.0`, `--benchmark gke-1.2.0` or `--benchmark gke-1.6.0` when you run the `kube-bench` command. To run the benchmark as a job in your GKE cluster apply the included `job-gke.yaml`. diff --git a/go.mod b/go.mod index a6cda311f..c64df2534 100644 --- a/go.mod +++ b/go.mod @@ -1,55 +1,59 @@ module github.com/aquasecurity/kube-bench -go 1.21 +go 1.22.0 + +toolchain go1.22.7 require ( - github.com/aws/aws-sdk-go-v2 v1.26.0 - github.com/aws/aws-sdk-go-v2/config v1.27.4 - github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 + github.com/aws/aws-sdk-go-v2 v1.32.3 + github.com/aws/aws-sdk-go-v2/config v1.27.37 + github.com/aws/aws-sdk-go-v2/service/securityhub v1.54.4 github.com/fatih/color v1.16.0 - github.com/golang/glog v1.2.0 + github.com/golang/glog v1.2.2 github.com/magiconair/properties v1.8.7 github.com/onsi/ginkgo v1.16.5 github.com/pkg/errors v0.9.1 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 gopkg.in/yaml.v2 v2.4.0 - gorm.io/driver/postgres v1.5.6 - gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde - k8s.io/apimachinery v0.29.3 - k8s.io/client-go v0.29.3 + gorm.io/driver/postgres v1.5.9 + gorm.io/gorm v1.25.12 + k8s.io/apimachinery v0.31.2 + k8s.io/client-go v0.31.2 ) require ( - github.com/aws/aws-sdk-go-v2/credentials v1.17.4 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 // indirect - github.com/aws/smithy-go v1.20.1 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.35 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.23.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.31.1 // indirect + github.com/aws/smithy-go v1.22.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.4.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/pgx/v5 v5.5.4 // indirect + github.com/jackc/pgx/v5 v5.5.5 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect @@ -70,29 +74,29 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect + github.com/x448/float16 v0.8.4 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.17.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/oauth2 v0.15.0 // indirect - golang.org/x/sync v0.5.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/term v0.15.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.3 // indirect - k8s.io/klog/v2 v2.110.1 // indirect - k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/api v0.31.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 62f9abca2..751c6a510 100644 --- a/go.sum +++ b/go.sum @@ -1,36 +1,32 @@ -github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2 v1.26.0 h1:/Ce4OCiM3EkpW7Y+xUnfAFpchU78K7/Ug01sZni9PgA= -github.com/aws/aws-sdk-go-v2 v1.26.0/go.mod h1:35hUlJVYd+M++iLI3ALmVwMOyRYMmRqUXpTtRGW+K9I= -github.com/aws/aws-sdk-go-v2/config v1.27.4 h1:AhfWb5ZwimdsYTgP7Od8E9L1u4sKmDW2ZVeLcf2O42M= -github.com/aws/aws-sdk-go-v2/config v1.27.4/go.mod h1:zq2FFXK3A416kiukwpsd+rD4ny6JC7QSkp4QdN1Mp2g= -github.com/aws/aws-sdk-go-v2/credentials v1.17.4 h1:h5Vztbd8qLppiPwX+y0Q6WiwMZgpd9keKe2EAENgAuI= -github.com/aws/aws-sdk-go-v2/credentials v1.17.4/go.mod h1:+30tpwrkOgvkJL1rUZuRLoxcJwtI/OkeBLYnHxJtVe0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 h1:AK0J8iYBFeUk2Ax7O8YpLtFsfhdOByh2QIkHmigpRYk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2/go.mod h1:iRlGzMix0SExQEviAyptRWRGdYNo3+ufW/lCzvKVTUc= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 h1:bNo4LagzUKbjdxE0tIcR9pMzLR2U/Tgie1Hq1HQ3iH8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2/go.mod h1:wRQv0nN6v9wDXuWThpovGQjqF1HFdcgWjporw14lS8k= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24/go.mod h1:gAuCezX/gob6BSMbItsSlMb6WZGV7K2+fWOvk8xBSto= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 h1:EtOU5jsPdIQNP+6Q2C5e3d65NKT1PeCiQk+9OdzO12Q= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2/go.mod h1:tyF5sKccmDz0Bv4NrstEr+/9YkSPJHrcO7UsUKf7pWM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2 h1:5ffmXjPtwRExp1zc7gENLgCPyHFbhEPwVTkTiH9niSk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.2/go.mod h1:Ru7vg1iQ7cR4i7SZ/JTLYN9kaXtbL69UdgG0OQWQxW0= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1 h1:+lpa31bGPPvgpZwUJ4ldKRCsPukzJ0PqoO5AQ9S79oQ= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.29.1/go.mod h1:vKGWzDG4Ytw3hgv/FvNy0HX/XEoJ6k/e7KAANzXWP8Y= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 h1:utEGkfdQ4L6YW/ietH7111ZYglLJvS+sLriHJ1NBJEQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.1/go.mod h1:RsYqzYr2F2oPDdpy+PdhephuZxTfjHQe7SOBcZGoAU8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 h1:9/GylMS45hGGFCcMrUZDVayQE1jYSIN6da9jo7RAYIw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1/go.mod h1:YjAPFn4kGFqKC54VsHs5fn5B6d+PCY2tziEa3U/GB5Y= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.1 h1:3I2cBEYgKhrWlwyZgfpSO2BpaMY1LHPqXYk/QGlu2ew= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.1/go.mod h1:uQ7YYKZt3adCRrdCBREm1CD3efFLOUNH77MrUCvx5oA= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= -github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd0wk= +github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2/config v1.27.37 h1:xaoIwzHVuRWRHFI0jhgEdEGc8xE1l91KaeRDsWEIncU= +github.com/aws/aws-sdk-go-v2/config v1.27.37/go.mod h1:S2e3ax9/8KnMSyRVNd3sWTKs+1clJ2f1U6nE0lpvQRg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.35 h1:7QknrZhYySEB1lEXJxGAmuD5sWwys5ZXNr4m5oEz0IE= +github.com/aws/aws-sdk-go-v2/credentials v1.17.35/go.mod h1:8Vy4kk7at4aPSmibr7K+nLTzG6qUQAUO4tW49fzUV4E= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14 h1:C/d03NAmh8C4BZXhuRNboF/DqhBkBCeDiJDcaqIT5pA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.14/go.mod h1:7I0Ju7p9mCIdlrfS+JCgqcYD0VXz/N4yozsox+0o078= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 h1:Jw50LwEkVjuVzE1NzkhNKkBf9cRN7MtE1F/b2cOKTUM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22/go.mod h1:Y/SmAyPcOTmpeVaWSzSKiILfXTVJwrGmYZhcRbhWuEY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 h1:981MHwBaRZM7+9QSR6XamDzF/o7ouUGxFzr+nVSIhrs= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22/go.mod h1:1RA1+aBEfn+CAB/Mh0MB6LsdCYCnjZm7tKXtnk499ZQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5 h1:QFASJGfT8wMXtuP3D5CRmMjARHv9ZmzFUMJznHDOY3w= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.5/go.mod h1:QdZ3OmoIjSX+8D1OPAzPxDfjXASbBMDsz9qvtyIhtik= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20 h1:Xbwbmk44URTiHNx6PNo0ujDE6ERlsCKJD3u1zfnzAPg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.20/go.mod h1:oAfOFzUB14ltPZj1rWwRc3d/6OgD76R8KlvU3EqM9Fg= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.54.4 h1:/dZV1aa+UyaP17M/gHQ6qHDEnvfHAF98CIXzerGQv9M= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.54.4/go.mod h1:3Aq0KVVKwxbRdEywQbgQLnVrimltVKejsW1fVMnK2Uc= +github.com/aws/aws-sdk-go-v2/service/sso v1.23.1 h1:2jrVsMHqdLD1+PA4BA6Nh1eZp0Gsy3mFSB5MxDvcJtU= +github.com/aws/aws-sdk-go-v2/service/sso v1.23.1/go.mod h1:XRlMvmad0ZNL+75C5FYdMvbbLkd6qiqz6foR1nA1PXY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1 h1:0L7yGCg3Hb3YQqnSgBTZM5wepougtL1aEccdcdYhHME= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.27.1/go.mod h1:FnvDM4sfa+isJ3kDXIzAB9GAwVSzFzSy97uZ3IsHo4E= +github.com/aws/aws-sdk-go-v2/service/sts v1.31.1 h1:8K0UNOkZiK9Uh3HIF6Bx0rcNCftqGCeKmOaR7Gp5BSo= +github.com/aws/aws-sdk-go-v2/service/sts v1.31.1/go.mod h1:yMWe0F+XG0DkRZK5ODZhG7BEFYhLXi2dqGsv6tX0cgI= +github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= +github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -46,23 +42,26 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -76,17 +75,16 @@ github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYu github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -96,16 +94,14 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8= -github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= +github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw= +github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -144,12 +140,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= -github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -157,8 +153,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= @@ -170,26 +166,30 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= @@ -199,29 +199,28 @@ go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTV golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= -golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -233,15 +232,14 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -249,22 +247,20 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -284,25 +280,25 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gorm.io/driver/postgres v1.5.6 h1:ydr9xEd5YAM0vxVDY0X139dyzNz10spDiDlC7+ibLeU= -gorm.io/driver/postgres v1.5.6/go.mod h1:3e019WlBaYI5o5LIdNV+LyxCMNtLOQETBXL2h4chKpA= -gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde h1:9DShaph9qhkIYw7QF91I/ynrr4cOO2PZra2PFD7Mfeg= -gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= -k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8= +gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI= +gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8= +gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/integration/testdata/Expected_output.data b/integration/testdata/Expected_output.data index d0cfe93fc..934f90ec6 100644 --- a/integration/testdata/Expected_output.data +++ b/integration/testdata/Expected_output.data @@ -281,7 +281,7 @@ Based on your system, restart the kubelet service. For example: systemctl daemon-reload systemctl restart kubelet.service -4.2.13 If using a Kubelet config file, edit the file to set TLSCipherSuites: to +4.2.13 If using a Kubelet config file, edit the file to set tlsCipherSuites: to TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256 or to a subset of these values. If using executable arguments, edit the kubelet service file diff --git a/internal/findings/publisher.go b/internal/findings/publisher.go index 1847a77fe..4f960afdb 100644 --- a/internal/findings/publisher.go +++ b/internal/findings/publisher.go @@ -54,11 +54,11 @@ func (p *Publisher) PublishFinding(finding []types.AwsSecurityFinding) (*Publish errs = errors.Wrap(err, "finding publish failed") } if r != nil { - if r.FailedCount != 0 { - o.FailedCount += r.FailedCount + if *r.FailedCount != 0 { + o.FailedCount += *r.FailedCount } - if r.SuccessCount != 0 { - o.SuccessCount += r.SuccessCount + if *r.SuccessCount != 0 { + o.SuccessCount += *r.SuccessCount } o.FailedFindings = append(o.FailedFindings, r.FailedFindings...) } diff --git a/job.yaml b/job.yaml index d8afd6562..1a22637ff 100644 --- a/job.yaml +++ b/job.yaml @@ -11,7 +11,7 @@ spec: spec: containers: - command: ["kube-bench"] - image: docker.io/aquasec/kube-bench:v0.7.3 + image: docker.io/aquasec/kube-bench:v0.9.1 name: kube-bench volumeMounts: - name: var-lib-cni diff --git a/makefile b/makefile index f9c91fc01..bcab5c3fa 100644 --- a/makefile +++ b/makefile @@ -11,6 +11,8 @@ uname := $(shell uname -s) BUILDX_PLATFORM ?= linux/amd64,linux/arm64,linux/arm,linux/ppc64le,linux/s390x DOCKER_ORGS ?= aquasec public.ecr.aws/aquasecurity GOARCH ?= $@ +KUBECTL_VERSION ?= 1.31.0 +ARCH ?= $(shell go env GOARCH) ifneq ($(findstring Microsoft,$(shell uname -r)),) BUILD_OS := windows @@ -45,15 +47,19 @@ build-fips: # builds the current dev docker version build-docker: docker build --build-arg BUILD_DATE=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ - --build-arg VCS_REF=$(VERSION) \ - --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ - -t $(IMAGE_NAME) . + --build-arg VCS_REF=$(VERSION) \ + --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ + --build-arg KUBECTL_VERSION=$(KUBECTL_VERSION) \ + --build-arg TARGETARCH=$(ARCH) \ + -t $(IMAGE_NAME) . build-docker-ubi: docker build -f Dockerfile.ubi --build-arg BUILD_DATE=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ - --build-arg VCS_REF=$(VERSION) \ - --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ - -t $(IMAGE_NAME_UBI) . + --build-arg VCS_REF=$(VERSION) \ + --build-arg KUBEBENCH_VERSION=$(KUBEBENCH_VERSION) \ + --build-arg KUBECTL_VERSION=$(KUBECTL_VERSION) \ + --build-arg TARGETARCH=$(ARCH) \ + -t $(IMAGE_NAME_UBI) . # unit tests tests: