diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index bc7253b02..718a3a619 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -53,6 +53,7 @@ jobs: - ^other$/^re[c-q] - ^other$/^res - ^other$/^[s-z] + - ^other-cel$/^[m-q] - ^pod-security$ - ^pod-security-cel$ - ^psa$ diff --git a/other-cel/memory-requests-equal-limits/.chainsaw-test/chainsaw-test.yaml b/other-cel/memory-requests-equal-limits/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..a6646a301 --- /dev/null +++ b/other-cel/memory-requests-equal-limits/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,39 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: memory-requests-equal-limits +spec: + steps: + - name: step-01 + try: + - apply: + file: ../memory-requests-equal-limits.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: memory-requests-equal-limits + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml + diff --git a/other-cel/memory-requests-equal-limits/.chainsaw-test/pod-bad.yaml b/other-cel/memory-requests-equal-limits/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..b5e8f35f5 --- /dev/null +++ b/other-cel/memory-requests-equal-limits/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,77 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + memory: "200Mi" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "10Mi" + limits: + memory: "140Mi" + - name: busybox02 + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + memory: "150Mi" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "120Mi" + limits: + memory: "120Mi" + - name: busybox02 + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + memory: "150Mi" +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + containers: + - name: busybox02 + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + memory: "200Mi" + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + memory: "100Mi" + diff --git a/other-cel/memory-requests-equal-limits/.chainsaw-test/pod-good.yaml b/other-cel/memory-requests-equal-limits/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..2b67af375 --- /dev/null +++ b/other-cel/memory-requests-equal-limits/.chainsaw-test/pod-good.yaml @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod00 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + memory: "100Mi" +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + memory: "100Mi" + - name: busybox02 + image: busybox:1.35 + - name: busybox03 + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + limits: + memory: "50Mi" + diff --git a/other-cel/memory-requests-equal-limits/.chainsaw-test/podcontroller-bad.yaml b/other-cel/memory-requests-equal-limits/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..85b8c5456 --- /dev/null +++ b/other-cel/memory-requests-equal-limits/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "10Mi" + limits: + memory: "140Mi" + - name: busybox02 + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + memory: "150Mi" +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "10Mi" + limits: + memory: "140Mi" + - name: busybox02 + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + memory: "150Mi" + restartPolicy: OnFailure + diff --git a/other-cel/memory-requests-equal-limits/.chainsaw-test/podcontroller-good.yaml b/other-cel/memory-requests-equal-limits/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..d54369b17 --- /dev/null +++ b/other-cel/memory-requests-equal-limits/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + memory: "100Mi" + - name: busybox02 + image: busybox:1.35 + - name: busybox03 + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + limits: + memory: "50Mi" +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: busybox + image: busybox:1.35 + resources: + requests: + memory: "100Mi" + limits: + memory: "100Mi" + - name: busybox02 + image: busybox:1.35 + - name: busybox03 + image: busybox:1.35 + resources: + requests: + memory: "50Mi" + limits: + memory: "50Mi" + restartPolicy: OnFailure + diff --git a/other-cel/memory-requests-equal-limits/.chainsaw-test/policy-ready.yaml b/other-cel/memory-requests-equal-limits/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..b0e2885d6 --- /dev/null +++ b/other-cel/memory-requests-equal-limits/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: memory-requests-equal-limits +status: + ready: true + diff --git a/other-cel/memory-requests-equal-limits/.kyverno-test/kyverno-test.yaml b/other-cel/memory-requests-equal-limits/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..73538ab7d --- /dev/null +++ b/other-cel/memory-requests-equal-limits/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,28 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: memory-requests-equal-limits +policies: +- ../memory-requests-equal-limits.yaml +resources: +- resource.yaml +results: +- kind: CronJob + policy: memory-requests-equal-limits + resources: + - hello + result: pass + rule: autogen-cronjob-memory-requests-equal-limits +- kind: DaemonSet + policy: memory-requests-equal-limits + resources: + - fluentd-elasticsearch + result: pass + rule: autogen-memory-requests-equal-limits +- kind: Pod + policy: memory-requests-equal-limits + resources: + - myapp-pod + result: fail + rule: memory-requests-equal-limits + diff --git a/other-cel/memory-requests-equal-limits/.kyverno-test/resource.yaml b/other-cel/memory-requests-equal-limits/.kyverno-test/resource.yaml new file mode 100644 index 000000000..33b5da389 --- /dev/null +++ b/other-cel/memory-requests-equal-limits/.kyverno-test/resource.yaml @@ -0,0 +1,73 @@ +# DaemonSet with equal resources.requests.memory to resources.limits.memory +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fluentd-elasticsearch + namespace: kube-system + labels: + k8s-app: fluentd-logging +spec: + selector: + matchLabels: + name: fluentd-elasticsearch + template: + metadata: + labels: + name: fluentd-elasticsearch + spec: + containers: + - name: fluentd-elasticsearch + image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + +--- +# Pod with unequal resources.requests.memory to resources.limits.memory +apiVersion: v1 +kind: Pod +metadata: + name: myapp-pod +spec: + containers: + - name: nginx + image: nginx + resources: + requests: + memory: "64Mi" + cpu: "250m" + limits: + memory: "128Mi" + cpu: "500m" + +--- +# CronJob with equal resources.requests.memory to resources.limits.memory +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: hello +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: hello + image: busybox + args: + - /bin/sh + - -c + - date; echo Hello from the Kubernetes cluster + resources: + requests: + cpu: "100m" + memory: "1000m" + limits: + cpu: "100m" + memory: "1000m" + restartPolicy: OnFailure + diff --git a/other-cel/memory-requests-equal-limits/artifacthub-pkg.yml b/other-cel/memory-requests-equal-limits/artifacthub-pkg.yml new file mode 100644 index 000000000..c50a6c04d --- /dev/null +++ b/other-cel/memory-requests-equal-limits/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: memory-requests-equal-limits-cel +version: 1.0.0 +displayName: Memory Requests Equal Limits in CEL expressions +description: >- + Pods which have memory limits equal to requests are given a QoS class of Guaranteed which is the highest schedulable class. This policy checks that all containers in a given Pod have memory requests equal to limits. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/memory-requests-equal-limits/memory-requests-equal-limits.yaml + ``` +keywords: + - kyverno + - Sample + - CEL Expressions +readme: | + Pods which have memory limits equal to requests are given a QoS class of Guaranteed which is the highest schedulable class. This policy checks that all containers in a given Pod have memory requests equal to limits. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 176dc9b492d3eee687bc89711d3414f13bf00548b85781e71ccaacd12bbf6f1a +createdAt: "2024-04-07T11:13:21Z" + diff --git a/other-cel/memory-requests-equal-limits/memory-requests-equal-limits.yaml b/other-cel/memory-requests-equal-limits/memory-requests-equal-limits.yaml new file mode 100644 index 000000000..82b23257b --- /dev/null +++ b/other-cel/memory-requests-equal-limits/memory-requests-equal-limits.yaml @@ -0,0 +1,38 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: memory-requests-equal-limits + annotations: + policies.kyverno.io/title: Memory Requests Equal Limits in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Pods which have memory limits equal to requests could be given a QoS class of Guaranteed if + they also set CPU limits equal to requests. Guaranteed is the highest schedulable class. + This policy checks that all containers in a given Pod have memory requests equal to limits. +spec: + validationFailureAction: Audit + background: false + rules: + - name: memory-requests-equal-limits + match: + any: + - resources: + kinds: + - Pod + validate: + cel: + variables: + - name: containersWithResources + expression: object.spec.containers.filter(container, has(container.resources)) + expressions: + - expression: >- + variables.containersWithResources.all(container, + !has(container.resources.requests) || + !has(container.resources.requests.memory) || + container.resources.requests.memory == container.resources.?limits.?memory.orValue('-1')) + message: "resources.requests.memory must be equal to resources.limits.memory" + diff --git a/other-cel/metadata-match-regex/.chainsaw-test/chainsaw-test.yaml b/other-cel/metadata-match-regex/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..51f02a908 --- /dev/null +++ b/other-cel/metadata-match-regex/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,39 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: metadata-match-regex +spec: + steps: + - name: step-01 + try: + - apply: + file: ../metadata-match-regex.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: metadata-match-regex + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml + diff --git a/other-cel/metadata-match-regex/.chainsaw-test/pod-bad.yaml b/other-cel/metadata-match-regex/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..e16e81cd7 --- /dev/null +++ b/other-cel/metadata-match-regex/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + corp.org/version: v1.1 + name: badpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + corp.org/version: "0.0.1" + name: badpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + corp.org/version: "v1.22.1" + name: badpod03 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + containers: + - name: busybox + image: busybox:1.35 + diff --git a/other-cel/metadata-match-regex/.chainsaw-test/pod-good.yaml b/other-cel/metadata-match-regex/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..9c3f19d3e --- /dev/null +++ b/other-cel/metadata-match-regex/.chainsaw-test/pod-good.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + corp.org/version: v0.1.9 + name: goodpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + corp.org/version: v0.0.1 + name: goodpod02 +spec: + containers: + - name: busybox + image: busybox:1.35 + diff --git a/other-cel/metadata-match-regex/.chainsaw-test/podcontroller-bad.yaml b/other-cel/metadata-match-regex/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..e1824fb48 --- /dev/null +++ b/other-cel/metadata-match-regex/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,40 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + corp.org/version: "v0.12.9" + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + corp.org/version: "v1.13" + spec: + containers: + - name: busybox + image: busybox:1.35 + restartPolicy: OnFailure + diff --git a/other-cel/metadata-match-regex/.chainsaw-test/podcontroller-good.yaml b/other-cel/metadata-match-regex/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..164d85d51 --- /dev/null +++ b/other-cel/metadata-match-regex/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,40 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + corp.org/version: "v0.1.9" + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + corp.org/version: "v0.1.9" + spec: + containers: + - name: busybox + image: busybox:1.35 + restartPolicy: OnFailure + diff --git a/other-cel/metadata-match-regex/.chainsaw-test/policy-ready.yaml b/other-cel/metadata-match-regex/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..aa0042784 --- /dev/null +++ b/other-cel/metadata-match-regex/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: metadata-match-regex +status: + ready: true + diff --git a/other-cel/metadata-match-regex/.kyverno-test/kyverno-test.yaml b/other-cel/metadata-match-regex/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c7259a539 --- /dev/null +++ b/other-cel/metadata-match-regex/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,53 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: metadata-match-regex +policies: +- ../metadata-match-regex.yaml +resources: +- ../.chainsaw-test/pod-bad.yaml +- ../.chainsaw-test/pod-good.yaml +- ../.chainsaw-test/podcontroller-bad.yaml +- ../.chainsaw-test/podcontroller-good.yaml +results: +- policy: metadata-match-regex + rule: check-for-regex + kind: Pod + resources: + - badpod01 + - badpod02 + - badpod03 + - badpod04 + result: fail +- policy: metadata-match-regex + rule: check-for-regex + kind: Deployment + resources: + - baddeployment01 + result: fail +- policy: metadata-match-regex + rule: check-for-regex + kind: CronJob + resources: + - badcronjob01 + result: fail +- policy: metadata-match-regex + rule: check-for-regex + kind: Pod + resources: + - goodpod01 + - goodpod02 + result: pass +- policy: metadata-match-regex + rule: check-for-regex + kind: Deployment + resources: + - gooddeployment01 + result: pass +- policy: metadata-match-regex + rule: check-for-regex + kind: CronJob + resources: + - goodcronjob01 + result: pass + diff --git a/other-cel/metadata-match-regex/artifacthub-pkg.yml b/other-cel/metadata-match-regex/artifacthub-pkg.yml new file mode 100644 index 000000000..c4c2212d4 --- /dev/null +++ b/other-cel/metadata-match-regex/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: metadata-match-regex-cel +version: 1.0.0 +displayName: Metadata Matches Regex in CEL expressions +description: >- + Rather than a simple check to see if given metadata such as labels and annotations are present, in some cases they need to be present and the values match a specified regular expression. This policy illustrates how to ensure a label with key `corp.org/version` is both present and matches a given regex, in this case ensuring semver is met. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/metadata-match-regex/metadata-match-regex.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + Rather than a simple check to see if given metadata such as labels and annotations are present, in some cases they need to be present and the values match a specified regular expression. This policy illustrates how to ensure a label with key `corp.org/version` is both present and matches a given regex, in this case ensuring semver is met. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod, Label" +digest: 4f6e2a07df41b3ce83af7ce25a6cdb7bae14f336edfd178bb52b25183f6c580d +createdAt: "2024-04-07T10:16:14Z" + diff --git a/other-cel/metadata-match-regex/metadata-match-regex.yaml b/other-cel/metadata-match-regex/metadata-match-regex.yaml new file mode 100644 index 000000000..be0c59c02 --- /dev/null +++ b/other-cel/metadata-match-regex/metadata-match-regex.yaml @@ -0,0 +1,35 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: metadata-match-regex + annotations: + policies.kyverno.io/title: Metadata Matches Regex in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod, Label + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Rather than a simple check to see if given metadata such as labels and annotations are present, + in some cases they need to be present and the values match a specified regular expression. This + policy illustrates how to ensure a label with key `corp.org/version` is both present and matches + a given regex, in this case ensuring semver is met. +spec: + validationFailureAction: Audit + background: false + rules: + - name: check-for-regex + match: + any: + - resources: + kinds: + - Pod + validate: + cel: + expressions: + - expression: >- + has(object.metadata.labels) && 'corp.org/version' in object.metadata.labels && + object.metadata.labels['corp.org/version'].matches('^v[0-9].[0-9].[0-9]$') + message: >- + The label `corp.org/version` is required and must match the specified regex: ^v[0-9].[0-9].[0-9]$ + diff --git a/other-cel/pdb-maxunavailable/.chainsaw-test/chainsaw-test.yaml b/other-cel/pdb-maxunavailable/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..84811c9fc --- /dev/null +++ b/other-cel/pdb-maxunavailable/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,32 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: pdb-maxunavailable +spec: + steps: + - name: step-01 + try: + - apply: + file: ../pdb-maxunavailable.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: pdb-maxunavailable + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pdb-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pdb-bad.yaml + diff --git a/other-cel/pdb-maxunavailable/.chainsaw-test/pdb-bad.yaml b/other-cel/pdb-maxunavailable/.chainsaw-test/pdb-bad.yaml new file mode 100644 index 000000000..48788b23e --- /dev/null +++ b/other-cel/pdb-maxunavailable/.chainsaw-test/pdb-bad.yaml @@ -0,0 +1,7 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: badpdb01 +spec: + maxUnavailable: 0 + diff --git a/other-cel/pdb-maxunavailable/.chainsaw-test/pdb-good.yaml b/other-cel/pdb-maxunavailable/.chainsaw-test/pdb-good.yaml new file mode 100644 index 000000000..d8c6a9e30 --- /dev/null +++ b/other-cel/pdb-maxunavailable/.chainsaw-test/pdb-good.yaml @@ -0,0 +1,14 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: goodpdb01 +spec: + minAvailable: 1 +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: goodpdb02 +spec: + maxUnavailable: 1 + diff --git a/other-cel/pdb-maxunavailable/.chainsaw-test/policy-ready.yaml b/other-cel/pdb-maxunavailable/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..235426002 --- /dev/null +++ b/other-cel/pdb-maxunavailable/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: pdb-maxunavailable +status: + ready: true + diff --git a/other-cel/pdb-maxunavailable/.kyverno-test/kyverno-test.yaml b/other-cel/pdb-maxunavailable/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..e62099f4d --- /dev/null +++ b/other-cel/pdb-maxunavailable/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,24 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: pdb-maxunavailable +policies: +- ../pdb-maxunavailable.yaml +resources: +- resource.yaml +results: +- kind: PodDisruptionBudget + policy: pdb-maxunavailable + resources: + - kube-system/bad-pdb-zero + - kube-system/bad-pdb-negative-one + result: fail + rule: pdb-maxunavailable +- kind: PodDisruptionBudget + policy: pdb-maxunavailable + resources: + - kube-system/good-pdb + - kube-system/good-pdb-none + result: pass + rule: pdb-maxunavailable + diff --git a/other-cel/pdb-maxunavailable/.kyverno-test/resource.yaml b/other-cel/pdb-maxunavailable/.kyverno-test/resource.yaml new file mode 100644 index 000000000..d7777edb9 --- /dev/null +++ b/other-cel/pdb-maxunavailable/.kyverno-test/resource.yaml @@ -0,0 +1,43 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: good-pdb + namespace: kube-system +spec: + maxUnavailable: 2 + selector: + matchLabels: + app: good +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: good-pdb-none + namespace: kube-system +spec: + selector: + matchLabels: + app: good +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: bad-pdb-zero + namespace: kube-system +spec: + maxUnavailable: 0 + selector: + matchLabels: + app: bad +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: bad-pdb-negative-one + namespace: kube-system +spec: + maxUnavailable: -1 + selector: + matchLabels: + app: bad + diff --git a/other-cel/pdb-maxunavailable/artifacthub-pkg.yml b/other-cel/pdb-maxunavailable/artifacthub-pkg.yml new file mode 100644 index 000000000..efc26a695 --- /dev/null +++ b/other-cel/pdb-maxunavailable/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: pdb-maxunavailable-cel +version: 1.0.0 +displayName: PodDisruptionBudget maxUnavailable Non-Zero in CEL expressions +description: >- + A PodDisruptionBudget which sets its maxUnavailable value to zero prevents all voluntary evictions including Node drains which may impact maintenance tasks. This policy enforces that if a PodDisruptionBudget specifies the maxUnavailable field it must be greater than zero. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/pdb-maxunavailable/pdb-maxunavailable.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + A PodDisruptionBudget which sets its maxUnavailable value to zero prevents all voluntary evictions including Node drains which may impact maintenance tasks. This policy enforces that if a PodDisruptionBudget specifies the maxUnavailable field it must be greater than zero. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "PodDisruptionBudget" +digest: 7dff4f3801bce1ca8835c5ebcadaa78e1fa41480a19958eb78aee5bbfcd6b8bf +createdAt: "2024-04-07T10:22:03Z" + diff --git a/other-cel/pdb-maxunavailable/pdb-maxunavailable.yaml b/other-cel/pdb-maxunavailable/pdb-maxunavailable.yaml new file mode 100644 index 000000000..4c2da59ae --- /dev/null +++ b/other-cel/pdb-maxunavailable/pdb-maxunavailable.yaml @@ -0,0 +1,31 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: pdb-maxunavailable + annotations: + policies.kyverno.io/title: PodDisruptionBudget maxUnavailable Non-Zero in CEL expressions + policies.kyverno.io/category: Other in CEL + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: PodDisruptionBudget + policies.kyverno.io/description: >- + A PodDisruptionBudget which sets its maxUnavailable value to zero prevents + all voluntary evictions including Node drains which may impact maintenance tasks. + This policy enforces that if a PodDisruptionBudget specifies the maxUnavailable field + it must be greater than zero. +spec: + validationFailureAction: Audit + background: false + rules: + - name: pdb-maxunavailable + match: + any: + - resources: + kinds: + - PodDisruptionBudget + validate: + cel: + expressions: + - expression: "!has(object.spec.maxUnavailable) || int(object.spec.maxUnavailable) > 0" + message: "The value of maxUnavailable must be greater than zero." + diff --git a/other-cel/prevent-bare-pods/.chainsaw-test/chainsaw-test.yaml b/other-cel/prevent-bare-pods/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..71881a8c8 --- /dev/null +++ b/other-cel/prevent-bare-pods/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,47 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: prevent-naked-pods +spec: + steps: + - name: step-01 + try: + - apply: + file: ns.yaml + - apply: + file: ../prevent-bare-pods.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: prevent-bare-pods + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + file: deployment.yaml + - name: step-03 + try: + - sleep: + duration: 5s + - script: + content: | + if [ $(kubectl get pods -n prevent-naked-pods-ns | grep gooddeployment01 | wc -l) -gt 0 ]; then exit 0; else exit 1; fi + - name: step-99 + try: + - script: + content: kubectl delete all --all --force --grace-period=0 -n prevent-naked-pods-ns + diff --git a/other-cel/prevent-bare-pods/.chainsaw-test/deployment.yaml b/other-cel/prevent-bare-pods/.chainsaw-test/deployment.yaml new file mode 100644 index 000000000..c79a4845a --- /dev/null +++ b/other-cel/prevent-bare-pods/.chainsaw-test/deployment.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 + namespace: prevent-naked-pods-ns +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: busybox + image: busybox:1.35 + command: ["sleep", "30"] + diff --git a/other-cel/prevent-bare-pods/.chainsaw-test/ns.yaml b/other-cel/prevent-bare-pods/.chainsaw-test/ns.yaml new file mode 100644 index 000000000..ed9eca697 --- /dev/null +++ b/other-cel/prevent-bare-pods/.chainsaw-test/ns.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: prevent-naked-pods-ns + diff --git a/other-cel/prevent-bare-pods/.chainsaw-test/pod-bad.yaml b/other-cel/prevent-bare-pods/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..cf952a0ce --- /dev/null +++ b/other-cel/prevent-bare-pods/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 + diff --git a/other-cel/prevent-bare-pods/.chainsaw-test/pod-good.yaml b/other-cel/prevent-bare-pods/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..7bd6e6c77 --- /dev/null +++ b/other-cel/prevent-bare-pods/.chainsaw-test/pod-good.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + ownerReferences: + - apiVersion: apps/v1 + kind: Deployment + name: gooddeployment01 + uid: "foo-bar" + name: goodpod01 +spec: + containers: + - name: busybox + image: busybox:1.35 + diff --git a/other-cel/prevent-bare-pods/.chainsaw-test/policy-ready.yaml b/other-cel/prevent-bare-pods/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..c88574c2e --- /dev/null +++ b/other-cel/prevent-bare-pods/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-bare-pods +status: + ready: true + diff --git a/other-cel/prevent-bare-pods/.kyverno-test/kyverno-test.yaml b/other-cel/prevent-bare-pods/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..1a43a336b --- /dev/null +++ b/other-cel/prevent-bare-pods/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,23 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: prevent-bare-pods +policies: +- ../prevent-bare-pods.yaml +resources: +- ../.chainsaw-test/pod-bad.yaml +- ../.chainsaw-test/pod-good.yaml +results: +- policy: prevent-bare-pods + rule: bare-pods + kind: Pod + resources: + - badpod01 + result: fail +- policy: prevent-bare-pods + rule: bare-pods + kind: Pod + resources: + - goodpod01 + result: pass + diff --git a/other-cel/prevent-bare-pods/artifacthub-pkg.yml b/other-cel/prevent-bare-pods/artifacthub-pkg.yml new file mode 100644 index 000000000..3917c8200 --- /dev/null +++ b/other-cel/prevent-bare-pods/artifacthub-pkg.yml @@ -0,0 +1,25 @@ +name: prevent-bare-pods-cel +version: 1.0.0 +displayName: Prevent bare Pods in CEL expressions +description: >- + Pods not created by workload controllers such as Deployments have no self-healing or scaling abilities and are unsuitable for production. This policy prevents such "bare" Pods from being created unless they originate from a higher-level workload controller of some sort. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/prevent-bare-pods/prevent-bare-pods.yaml + ``` +keywords: + - kyverno + - Other + - EKS Best Practices + - CEL Expressions +readme: | + Pods not created by workload controllers such as Deployments have no self-healing or scaling abilities and are unsuitable for production. This policy prevents such "bare" Pods from being created unless they originate from a higher-level workload controller of some sort. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other, EKS Best Practices in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 325e1a068bd771c60a304f121675b9d895bcc8abacc7b48054ae4465d51fd158 +createdAt: "2024-04-07T10:47:32Z" + diff --git a/other-cel/prevent-bare-pods/prevent-bare-pods.yaml b/other-cel/prevent-bare-pods/prevent-bare-pods.yaml new file mode 100644 index 000000000..2afe850c7 --- /dev/null +++ b/other-cel/prevent-bare-pods/prevent-bare-pods.yaml @@ -0,0 +1,34 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-bare-pods + annotations: + policies.kyverno.io/title: Prevent Bare Pods in CEL expressions + pod-policies.kyverno.io/autogen-controllers: none + policies.kyverno.io/category: Other, EKS Best Practices in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Pods not created by workload controllers such as Deployments + have no self-healing or scaling abilities and are unsuitable for production. + This policy prevents such "bare" Pods from being created unless they originate + from a higher-level workload controller of some sort. +spec: + validationFailureAction: Audit + background: true + rules: + - name: bare-pods + match: + any: + - resources: + kinds: + - Pod + validate: + cel: + expressions: + - expression: "'ownerReferences' in object.metadata" + message: "Bare Pods are not allowed. They must be created by Pod controllers." + diff --git a/other-cel/prevent-cr8escape/.chainsaw-test/chainsaw-test.yaml b/other-cel/prevent-cr8escape/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..7816e6083 --- /dev/null +++ b/other-cel/prevent-cr8escape/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,31 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: prevent-cr8escape +spec: + steps: + - name: step-01 + try: + - apply: + file: ../prevent-cr8escape.yaml + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pods-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pods-bad.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml + diff --git a/other-cel/prevent-cr8escape/.chainsaw-test/podcontroller-bad.yaml b/other-cel/prevent-cr8escape/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..26556887a --- /dev/null +++ b/other-cel/prevent-cr8escape/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + securityContext: + sysctls: + - name: "bar" + value: "foo" + - name: "foo" + value: "foo=bar" + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + securityContext: + sysctls: + - name: "foo" + value: "foo+bar" + - name: "bar" + value: "foo" + containers: + - name: busybox + image: busybox:1.35 + restartPolicy: OnFailure + diff --git a/other-cel/prevent-cr8escape/.chainsaw-test/podcontroller-good.yaml b/other-cel/prevent-cr8escape/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..487b91d99 --- /dev/null +++ b/other-cel/prevent-cr8escape/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + securityContext: + sysctls: + - name: "foo" + value: "bar" + - name: "bar" + value: "foo" + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + securityContext: + sysctls: + - name: "foo" + value: "bar" + - name: "bar" + value: "foo" + containers: + - name: busybox + image: busybox:1.35 + restartPolicy: OnFailure + diff --git a/other-cel/prevent-cr8escape/.chainsaw-test/pods-bad.yaml b/other-cel/prevent-cr8escape/.chainsaw-test/pods-bad.yaml new file mode 100644 index 000000000..05825db5a --- /dev/null +++ b/other-cel/prevent-cr8escape/.chainsaw-test/pods-bad.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + securityContext: + sysctls: + - name: "foo" + value: "foo+bar" + - name: "bar" + value: "foo" + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + securityContext: + sysctls: + - name: "bar" + value: "foo" + - name: "foo" + value: "foo=bar" + containers: + - name: busybox + image: busybox:1.35 + diff --git a/other-cel/prevent-cr8escape/.chainsaw-test/pods-good.yaml b/other-cel/prevent-cr8escape/.chainsaw-test/pods-good.yaml new file mode 100644 index 000000000..ed0f0c5fa --- /dev/null +++ b/other-cel/prevent-cr8escape/.chainsaw-test/pods-good.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + securityContext: + sysctls: + - name: "foo" + value: "bar" + - name: "bar" + value: "foo" + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + securityContext: + allowPrivilegeEscalation: false + containers: + - name: busybox + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - name: busybox + image: busybox:1.35 + diff --git a/other-cel/prevent-cr8escape/.chainsaw-test/policy-ready.yaml b/other-cel/prevent-cr8escape/.chainsaw-test/policy-ready.yaml new file mode 100644 index 000000000..1259408f7 --- /dev/null +++ b/other-cel/prevent-cr8escape/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,10 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-cr8escape +status: + conditions: + - reason: Succeeded + status: "True" + type: Ready + diff --git a/other-cel/prevent-cr8escape/.kyverno-test/kyverno-test.yaml b/other-cel/prevent-cr8escape/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..29a5cc8c5 --- /dev/null +++ b/other-cel/prevent-cr8escape/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,23 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: prevent-cr8escape +policies: +- ../prevent-cr8escape.yaml +resources: +- resources.yaml +results: +- kind: Pod + policy: prevent-cr8escape + resources: + - badpod01 + result: fail + rule: restrict-sysctls-cr8escape +- kind: Pod + policy: prevent-cr8escape + resources: + - pod-sysctl-good + - pod-no-sysctl + result: pass + rule: restrict-sysctls-cr8escape + diff --git a/other-cel/prevent-cr8escape/.kyverno-test/resources.yaml b/other-cel/prevent-cr8escape/.kyverno-test/resources.yaml new file mode 100644 index 000000000..e5ae26766 --- /dev/null +++ b/other-cel/prevent-cr8escape/.kyverno-test/resources.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + securityContext: + sysctls: + - name: kernel.shm_rmid_forced + value: "1+kernel.core_pattern=|/var/lib/containers/storage/overlay/3ef1281bce79865599f673b476957be73f994d17c15109d2b6a426711cf753e6/diff/malicious.sh #" + containers: + - name: alpine + image: alpine:latest + command: ["tail", "-f", "/dev/null"] +--- +apiVersion: v1 +kind: Pod +metadata: + name: pod-no-sysctl +spec: + containers: + - name: alpine + image: alpine:latest + command: ["tail", "-f", "/dev/null"] +--- +apiVersion: v1 +kind: Pod +metadata: + name: pod-sysctl-good +spec: + securityContext: + sysctls: + - name: kernel.shm_rmid_forced + value: "2" + containers: + - name: alpine + image: alpine:latest + command: ["tail", "-f", "/dev/null"] + diff --git a/other-cel/prevent-cr8escape/artifacthub-pkg.yml b/other-cel/prevent-cr8escape/artifacthub-pkg.yml new file mode 100644 index 000000000..afc130c3e --- /dev/null +++ b/other-cel/prevent-cr8escape/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: prevent-cr8escape-cel +version: 1.0.0 +displayName: Prevent cr8escape (CVE-2022-0811) in CEL expressions +description: >- + A vulnerability "cr8escape" (CVE-2022-0811) in CRI-O the container runtime engine underpinning Kubernetes allows attackers to escape from a Kubernetes container and gain root access to the host. The recommended remediation is to disallow sysctl settings with + or = in their value. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/prevent-cr8escape/prevent-cr8escape.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + A vulnerability "cr8escape" (CVE-2022-0811) in CRI-O the container runtime engine underpinning Kubernetes allows attackers to escape from a Kubernetes container and gain root access to the host. The recommended remediation is to disallow sysctl settings with + or = in their value. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 84a0f441ce5baec6060606a05f2f7f54847e79b48a38c9edc1655e6f0caf8bbf +createdAt: "2024-04-08T10:46:02Z" + diff --git a/other-cel/prevent-cr8escape/prevent-cr8escape.yaml b/other-cel/prevent-cr8escape/prevent-cr8escape.yaml new file mode 100644 index 000000000..dfa8e918e --- /dev/null +++ b/other-cel/prevent-cr8escape/prevent-cr8escape.yaml @@ -0,0 +1,35 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: prevent-cr8escape + annotations: + policies.kyverno.io/title: Prevent cr8escape (CVE-2022-0811) in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: high + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + A vulnerability "cr8escape" (CVE-2022-0811) in CRI-O the container runtime engine + underpinning Kubernetes allows attackers to escape from a Kubernetes container + and gain root access to the host. The recommended remediation is to disallow + sysctl settings with + or = in their value. +spec: + validationFailureAction: Enforce + background: true + rules: + - name: restrict-sysctls-cr8escape + match: + any: + - resources: + kinds: + - Pod + validate: + cel: + expressions: + - expression: >- + !has(object.spec.securityContext) || !has(object.spec.securityContext.sysctls) || + object.spec.securityContext.sysctls.all(sysctl, !has(sysctl.value) || (!sysctl.value.contains('+') && !sysctl.value.contains('='))) + message: "characters '+' or '=' are not allowed in sysctls values" + diff --git a/other/metadata-match-regex/.kyverno-test/kyverno-test.yaml b/other/metadata-match-regex/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..9e2f2c1f8 --- /dev/null +++ b/other/metadata-match-regex/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,46 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: metadata-match-regex +policies: +- ../metadata-match-regex.yaml +resources: +- ../.chainsaw-test/pod-bad.yaml +- ../.chainsaw-test/pod-good.yaml +- ../.chainsaw-test/podcontroller-bad.yaml +- ../.chainsaw-test/podcontroller-good.yaml +results: +- policy: metadata-match-regex + rule: check-for-regex + kind: Pod + resources: + - badpod01 + - badpod02 + - badpod03 + - badpod04 + result: fail +- policy: metadata-match-regex + rule: check-for-regex + kind: Deployment + resources: + - baddeployment01 + result: fail +- policy: metadata-match-regex + rule: check-for-regex + kind: CronJob + resources: + - badcronjob01 + result: fail +- policy: metadata-match-regex + rule: check-for-regex + kind: Deployment + resources: + - gooddeployment01 + result: pass +- policy: metadata-match-regex + rule: check-for-regex + kind: CronJob + resources: + - goodcronjob01 + result: pass + diff --git a/other/prevent-bare-pods/.chainsaw-test/pod-good.yaml b/other/prevent-bare-pods/.chainsaw-test/pod-good.yaml index 649c25ce4..f7e01fe38 100644 --- a/other/prevent-bare-pods/.chainsaw-test/pod-good.yaml +++ b/other/prevent-bare-pods/.chainsaw-test/pod-good.yaml @@ -6,7 +6,7 @@ metadata: kind: Deployment name: gooddeployment01 uid: "foo-bar" - name: godpod01 + name: goodpod01 spec: containers: - name: busybox diff --git a/other/prevent-bare-pods/.kyverno-test/kyverno-test.yaml b/other/prevent-bare-pods/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..1a43a336b --- /dev/null +++ b/other/prevent-bare-pods/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,23 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: prevent-bare-pods +policies: +- ../prevent-bare-pods.yaml +resources: +- ../.chainsaw-test/pod-bad.yaml +- ../.chainsaw-test/pod-good.yaml +results: +- policy: prevent-bare-pods + rule: bare-pods + kind: Pod + resources: + - badpod01 + result: fail +- policy: prevent-bare-pods + rule: bare-pods + kind: Pod + resources: + - goodpod01 + result: pass + diff --git a/other/prevent-cr8escape/.kyverno-test/kyverno-test.yaml b/other/prevent-cr8escape/.kyverno-test/kyverno-test.yaml index e9dae26dc..b9bb93bba 100644 --- a/other/prevent-cr8escape/.kyverno-test/kyverno-test.yaml +++ b/other/prevent-cr8escape/.kyverno-test/kyverno-test.yaml @@ -1,7 +1,7 @@ apiVersion: cli.kyverno.io/v1alpha1 kind: Test metadata: - name: restrict- + name: prevent-cr8escape policies: - ../prevent-cr8escape.yaml resources: