diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index d07a3f019..0e806fa5d 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -47,15 +47,16 @@ jobs: - ^nginx-ingress$ - ^openshift$ - ^other$/^a + - ^other-cel$/^a - ^other$/^[b-d] + - ^other-cel$/^[b-d] - ^other$/^[e-l] + - ^other-cel$/^[e-l] - ^other$/^[m-q] + - ^other-cel$/^[m-q] - ^other$/^re[c-q] - ^other$/^res - ^other$/^[s-z] - - ^other-cel$/^a - - ^other-cel$/^[b-d] - - ^other-cel$/^[m-q] - ^pod-security$ - ^pod-security-cel$ - ^psa$ diff --git a/other-cel/enforce-pod-duration/.chainsaw-test/chainsaw-test.yaml b/other-cel/enforce-pod-duration/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..18c018e87 --- /dev/null +++ b/other-cel/enforce-pod-duration/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,39 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: enforce-pod-duration +spec: + steps: + - name: step-01 + try: + - apply: + file: ../enforce-pod-duration.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: pod-lifetime + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pods-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pods-bad.yaml + - apply: + file: podcontrollers-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontrollers-bad.yaml + diff --git a/other-cel/enforce-pod-duration/.chainsaw-test/podcontrollers-bad.yaml b/other-cel/enforce-pod-duration/.chainsaw-test/podcontrollers-bad.yaml new file mode 100644 index 000000000..7668856fd --- /dev/null +++ b/other-cel/enforce-pod-duration/.chainsaw-test/podcontrollers-bad.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + annotations: + pod.kubernetes.io/lifetime: "8h5m" + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + pod.kubernetes.io/lifetime: "8h5m" + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + restartPolicy: OnFailure + diff --git a/other-cel/enforce-pod-duration/.chainsaw-test/podcontrollers-good.yaml b/other-cel/enforce-pod-duration/.chainsaw-test/podcontrollers-good.yaml new file mode 100644 index 000000000..8cf26dc07 --- /dev/null +++ b/other-cel/enforce-pod-duration/.chainsaw-test/podcontrollers-good.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + annotations: + pod.kubernetes.io/lifetime: "8h" + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + annotations: + pod.kubernetes.io/lifetime: "5m" + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + restartPolicy: OnFailure + diff --git a/other-cel/enforce-pod-duration/.chainsaw-test/pods-bad.yaml b/other-cel/enforce-pod-duration/.chainsaw-test/pods-bad.yaml new file mode 100644 index 000000000..bca56ed61 --- /dev/null +++ b/other-cel/enforce-pod-duration/.chainsaw-test/pods-bad.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Pod +metadata: + annotations: + foo: bar + pod.kubernetes.io/lifetime: "8h1m" + name: badpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + annotations: + pod.kubernetes.io/lifetime: "24h" + foo: bar + name: badpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox + diff --git a/other-cel/enforce-pod-duration/.chainsaw-test/pods-good.yaml b/other-cel/enforce-pod-duration/.chainsaw-test/pods-good.yaml new file mode 100644 index 000000000..8e5741b96 --- /dev/null +++ b/other-cel/enforce-pod-duration/.chainsaw-test/pods-good.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + annotations: + foo: bar + name: goodpod02 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + annotations: + foo: bar + pod.kubernetes.io/lifetime: "5m" + name: goodpod03 +spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: v1 +kind: Pod +metadata: + annotations: + pod.kubernetes.io/lifetime: "8h" + foo: bar + name: goodpod04 +spec: + containers: + - image: busybox:1.35 + name: busybox + diff --git a/other-cel/enforce-pod-duration/.chainsaw-test/policy-ready.yaml b/other-cel/enforce-pod-duration/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..2530f09c7 --- /dev/null +++ b/other-cel/enforce-pod-duration/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: pod-lifetime +status: + ready: true + diff --git a/other-cel/enforce-pod-duration/.kyverno-test/kyverno-test.yaml b/other-cel/enforce-pod-duration/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..da3c01fd4 --- /dev/null +++ b/other-cel/enforce-pod-duration/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,22 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: pod-lifetime +policies: +- ../enforce-pod-duration.yaml +resources: +- resources.yaml +results: +- kind: Pod + policy: pod-lifetime + resources: + - test-lifetime-fail + result: fail + rule: pods-lifetime +- kind: Pod + policy: pod-lifetime + resources: + - test-lifetime-pass + result: pass + rule: pods-lifetime + diff --git a/other-cel/enforce-pod-duration/.kyverno-test/resources.yaml b/other-cel/enforce-pod-duration/.kyverno-test/resources.yaml new file mode 100644 index 000000000..b40ef2969 --- /dev/null +++ b/other-cel/enforce-pod-duration/.kyverno-test/resources.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-lifetime-pass + namespace: test + annotations: + pod.kubernetes.io/lifetime: 4h +spec: + containers: + - name: nginx + image: nginx:1.12 +--- +apiVersion: v1 +kind: Pod +metadata: + name: test-lifetime-fail + namespace: test + annotations: + pod.kubernetes.io/lifetime: 24h +spec: + containers: + - name: nginx + image: nginx:1.12 + diff --git a/other-cel/enforce-pod-duration/artifacthub-pkg.yml b/other-cel/enforce-pod-duration/artifacthub-pkg.yml new file mode 100644 index 000000000..c2c68faf6 --- /dev/null +++ b/other-cel/enforce-pod-duration/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: enforce-pod-duration-cel +version: 1.0.0 +displayName: Enforce pod duration in CEL expressions +description: >- + This validation is valuable when annotations are used to define durations, such as to ensure a Pod lifetime annotation does not exceed some site specific max threshold. Pod lifetime annotation can be no greater than 8 hours. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/enforce-pod-duration/enforce-pod-duration.yaml + ``` +keywords: + - kyverno + - Sample + - CEL Expressions +readme: | + This validation is valuable when annotations are used to define durations, such as to ensure a Pod lifetime annotation does not exceed some site specific max threshold. Pod lifetime annotation can be no greater than 8 hours. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: b2f1fec7c8b199024c813b1ddb3d52f27f889d082c0c94f4824c499cd6b278bb +createdAt: "2024-03-30T18:18:11Z" + diff --git a/other-cel/enforce-pod-duration/enforce-pod-duration.yaml b/other-cel/enforce-pod-duration/enforce-pod-duration.yaml new file mode 100644 index 000000000..48ccc0fb7 --- /dev/null +++ b/other-cel/enforce-pod-duration/enforce-pod-duration.yaml @@ -0,0 +1,35 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: pod-lifetime + annotations: + policies.kyverno.io/title: Enforce pod duration in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + This validation is valuable when annotations are used to define durations, + such as to ensure a Pod lifetime annotation does not exceed some site specific max threshold. + Pod lifetime annotation can be no greater than 8 hours. +spec: + validationFailureAction: Audit + background: true + rules: + - name: pods-lifetime + match: + any: + - resources: + kinds: + - Pod + validate: + cel: + variables: + - name: hasLifetimeAnnotation + expression: "has(object.metadata.annotations) && 'pod.kubernetes.io/lifetime' in object.metadata.annotations" + - name: lifetimeAnnotationValue + expression: "variables.hasLifetimeAnnotation ? object.metadata.annotations['pod.kubernetes.io/lifetime'] : '0s'" + expressions: + - expression: "!(duration(variables.lifetimeAnnotationValue) > duration('8h'))" + message: "Pod lifetime exceeds limit of 8h" + diff --git a/other-cel/enforce-readwriteonce-pod/.kyverno-test/kyverno-test.yaml b/other-cel/enforce-readwriteonce-pod/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..31c6b838a --- /dev/null +++ b/other-cel/enforce-readwriteonce-pod/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,22 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: enforce-readwriteonce-pod +policies: +- ../enforce-readwriteonce-pod.yaml +resources: +- resource.yaml +results: +- kind: PersistentVolumeClaim + policy: readwriteonce-pod + resources: + - badpvc + result: fail + rule: readwrite-pvc-single-pod +- kind: PersistentVolumeClaim + policy: readwriteonce-pod + resources: + - goodpvc + result: pass + rule: readwrite-pvc-single-pod + diff --git a/other-cel/enforce-readwriteonce-pod/.kyverno-test/resource.yaml b/other-cel/enforce-readwriteonce-pod/.kyverno-test/resource.yaml new file mode 100644 index 000000000..df1206b98 --- /dev/null +++ b/other-cel/enforce-readwriteonce-pod/.kyverno-test/resource.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: badpvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: goodpvc +spec: + accessModes: + - ReadWriteOncePod + resources: + requests: + storage: 1Gi + diff --git a/other-cel/enforce-readwriteonce-pod/artifacthub-pkg.yml b/other-cel/enforce-readwriteonce-pod/artifacthub-pkg.yml new file mode 100644 index 000000000..520a5ac06 --- /dev/null +++ b/other-cel/enforce-readwriteonce-pod/artifacthub-pkg.yml @@ -0,0 +1,34 @@ +name: enforce-readwriteonce-pod-cel +version: 1.0.0 +displayName: Enforce readwriteoncepod in CEL expressions +description: >- + Some stateful workloads with multiple replicas only allow a single Pod to write + to a given volume at a time. Beginning in Kubernetes 1.22 and enabled by default + in 1.27, a new setting called ReadWriteOncePod, available + for CSI volumes only, allows volumes to be writable from only a single Pod. For more + information see the blog https://kubernetes.io/blog/2023/04/20/read-write-once-pod-access-mode-beta/. + This policy enforces that the accessModes for a PersistentVolumeClaim be set to ReadWriteOncePod. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/enforce-readwriteonce-pod/enforce-readwriteonce-pod.yaml + ``` +keywords: + - kyverno + - Sample + - CEL Expressions +readme: | + Some stateful workloads with multiple replicas only allow a single Pod to write + to a given volume at a time. Beginning in Kubernetes 1.22 and enabled by default + in 1.27, a new setting called ReadWriteOncePod, available + for CSI volumes only, allows volumes to be writable from only a single Pod. For more + information see the blog https://kubernetes.io/blog/2023/04/20/read-write-once-pod-access-mode-beta/. + This policy enforces that the accessModes for a PersistentVolumeClaim be set to ReadWriteOncePod. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "PersistentVolumeClaims" +digest: de7662c3394731c2de9205ebdda2da9da69e8022b616ca6e4ea9dbfd8ad2b2a8 +createdAt: "2024-03-31T10:53:27Z" + diff --git a/other-cel/enforce-readwriteonce-pod/enforce-readwriteonce-pod.yaml b/other-cel/enforce-readwriteonce-pod/enforce-readwriteonce-pod.yaml new file mode 100644 index 000000000..5c5720e4d --- /dev/null +++ b/other-cel/enforce-readwriteonce-pod/enforce-readwriteonce-pod.yaml @@ -0,0 +1,33 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: readwriteonce-pod + annotations: + policies.kyverno.io/title: Enforce ReadWriteOncePod in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/subject: PersistentVolumeClaim + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.27-1.28" + policies.kyverno.io/description: >- + Some stateful workloads with multiple replicas only allow a single Pod to write + to a given volume at a time. Beginning in Kubernetes 1.22 and enabled by default + in 1.27, a new setting called ReadWriteOncePod, available + for CSI volumes only, allows volumes to be writable from only a single Pod. For more + information see the blog https://kubernetes.io/blog/2023/04/20/read-write-once-pod-access-mode-beta/. + This policy enforces that the accessModes for a PersistentVolumeClaim be set to ReadWriteOncePod. +spec: + validationFailureAction: Audit + background: true + rules: + - name: readwrite-pvc-single-pod + match: + any: + - resources: + kinds: + - PersistentVolumeClaim + validate: + cel: + expressions: + - expression: "'ReadWriteOncePod' in object.spec.accessModes" + message: "The accessMode must be set to ReadWriteOncePod." + diff --git a/other-cel/ensure-probes-different/.chainsaw-test/chainsaw-test.yaml b/other-cel/ensure-probes-different/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..119c554b3 --- /dev/null +++ b/other-cel/ensure-probes-different/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,38 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: ensure-probes-different +spec: + steps: + - name: step-01 + try: + - apply: + file: ns.yaml + - apply: + file: ../ensure-probes-different.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: validate-probes + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: podcontrollers-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontrollers-bad.yaml + - name: step-99 + try: + - script: + content: kubectl delete all --all --force --grace-period=0 -n ensure-probes-different-ns + diff --git a/other-cel/ensure-probes-different/.chainsaw-test/ns.yaml b/other-cel/ensure-probes-different/.chainsaw-test/ns.yaml new file mode 100644 index 000000000..055f17f7e --- /dev/null +++ b/other-cel/ensure-probes-different/.chainsaw-test/ns.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ensure-probes-different-ns + diff --git a/other-cel/ensure-probes-different/.chainsaw-test/podcontrollers-bad.yaml b/other-cel/ensure-probes-different/.chainsaw-test/podcontrollers-bad.yaml new file mode 100644 index 000000000..4042ab8a8 --- /dev/null +++ b/other-cel/ensure-probes-different/.chainsaw-test/podcontrollers-bad.yaml @@ -0,0 +1,116 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 + namespace: ensure-probes-different-ns +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + exec: + command: + - cat + - /tmp/healthy + periodSeconds: 10 + readinessProbe: + tcpSocket: + port: 8080 + - image: busybox:1.35 + name: busybox02 + command: ["sleep","300"] + livenessProbe: + exec: + command: + - cat + - /tmp/healthy + periodSeconds: 10 + readinessProbe: + exec: + command: + - cat + - /tmp/healthy + periodSeconds: 10 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: badds01 + namespace: ensure-probes-different-ns +spec: + selector: + matchLabels: + name: busybox + template: + metadata: + labels: + name: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + command: ["sleep","300"] + livenessProbe: + tcpSocket: + port: 8080 + periodSeconds: 10 + readinessProbe: + tcpSocket: + port: 8080 + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + livenessProbe: + exec: + command: + - cat + - /tmp/healthy + periodSeconds: 10 + readinessProbe: + tcpSocket: + port: 8080 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: badss01 + namespace: ensure-probes-different-ns +spec: + selector: + matchLabels: + app: busybox + serviceName: "busybox" + replicas: 1 + template: + metadata: + labels: + app: busybox + spec: + terminationGracePeriodSeconds: 5 + containers: + - image: busybox:1.35 + name: busybox02 + command: ["sleep","300"] + livenessProbe: + httpGet: + path: /healthz + port: 8080 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /healthz + port: 8080 + periodSeconds: 10 + diff --git a/other-cel/ensure-probes-different/.chainsaw-test/podcontrollers-good.yaml b/other-cel/ensure-probes-different/.chainsaw-test/podcontrollers-good.yaml new file mode 100644 index 000000000..b82e3c264 --- /dev/null +++ b/other-cel/ensure-probes-different/.chainsaw-test/podcontrollers-good.yaml @@ -0,0 +1,114 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 + namespace: ensure-probes-different-ns +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + command: ["sleep","300"] + readinessProbe: + exec: + command: + - cat + - /tmp/healthy + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + command: ["sleep","300"] + livenessProbe: + exec: + command: + - cat + - /tmp/healthy + periodSeconds: 10 + readinessProbe: + tcpSocket: + port: 8080 + periodSeconds: 10 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: goodds01 + namespace: ensure-probes-different-ns +spec: + selector: + matchLabels: + name: busybox + template: + metadata: + labels: + name: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + command: ["sleep","300"] + readinessProbe: + exec: + command: + - cat + - /tmp/healthy + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + command: ["sleep","300"] + livenessProbe: + exec: + command: + - cat + - /tmp/healthy + periodSeconds: 10 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: goodss01 + namespace: ensure-probes-different-ns +spec: + selector: + matchLabels: + app: busybox + serviceName: "busybox" + replicas: 1 + template: + metadata: + labels: + app: busybox + spec: + terminationGracePeriodSeconds: 5 + containers: + - image: busybox:1.35 + name: busybox + livenessProbe: + tcpSocket: + port: 8080 + periodSeconds: 10 + - image: busybox:1.35 + name: busybox02 + command: ["sleep","300"] + livenessProbe: + exec: + command: + - cat + - /tmp/healthy + periodSeconds: 10 + readinessProbe: + tcpSocket: + port: 8080 + periodSeconds: 10 + diff --git a/other-cel/ensure-probes-different/.chainsaw-test/policy-ready.yaml b/other-cel/ensure-probes-different/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..1a741c0b9 --- /dev/null +++ b/other-cel/ensure-probes-different/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: validate-probes +status: + ready: true + diff --git a/other-cel/ensure-probes-different/.kyverno-test/kyverno-test.yaml b/other-cel/ensure-probes-different/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..6812ebacc --- /dev/null +++ b/other-cel/ensure-probes-different/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,22 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: validate-probes +policies: +- ../ensure-probes-different.yaml +resources: +- resource.yaml +results: +- kind: Deployment + policy: validate-probes + resources: + - mydeploy-2 + result: fail + rule: validate-probes +- kind: Deployment + policy: validate-probes + resources: + - mydeploy-1 + result: pass + rule: validate-probes + diff --git a/other-cel/ensure-probes-different/.kyverno-test/resource.yaml b/other-cel/ensure-probes-different/.kyverno-test/resource.yaml new file mode 100644 index 000000000..6f524a756 --- /dev/null +++ b/other-cel/ensure-probes-different/.kyverno-test/resource.yaml @@ -0,0 +1,67 @@ +# "Liveness and readiness probes are not same." +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + name: mydeploy-1 +spec: + replicas: 1 + selector: + matchLabels: + app: goproxy + template: + metadata: + name: goproxy + labels: + app: goproxy + spec: + containers: + - name: goproxy + image: registry.k8s.io/goproxy:0.1 + ports: + - containerPort: 8080 + readinessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 20 + +--- +# "Liveness and readiness probes are same." +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + name: mydeploy-2 +spec: + replicas: 1 + selector: + matchLabels: + app: goproxy + template: + metadata: + name: goproxy + labels: + app: goproxy + spec: + containers: + - name: goproxy + image: registry.k8s.io/goproxy:0.1 + ports: + - containerPort: 8080 + readinessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 10 + livenessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 10 + diff --git a/other-cel/ensure-probes-different/artifacthub-pkg.yml b/other-cel/ensure-probes-different/artifacthub-pkg.yml new file mode 100644 index 000000000..f90d1059f --- /dev/null +++ b/other-cel/ensure-probes-different/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: ensure-probes-different-cel +version: 1.0.0 +displayName: Validate Probes in CEL expressions +description: >- + Liveness and readiness probes accomplish different goals, and setting both to the same is an anti-pattern and often results in app problems in the future. This policy checks that liveness and readiness probes are not equal. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/ensure-probes-different/ensure-probes-different.yaml + ``` +keywords: + - kyverno + - Sample + - CEL Expressions +readme: | + Liveness and readiness probes accomplish different goals, and setting both to the same is an anti-pattern and often results in app problems in the future. This policy checks that liveness and readiness probes are not equal. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: cbafa29e49ec48f7378157f69fa77a53c07fb40dc4c542738a8f31331689f5be +createdAt: "2024-03-31T11:12:02Z" + diff --git a/other-cel/ensure-probes-different/ensure-probes-different.yaml b/other-cel/ensure-probes-different/ensure-probes-different.yaml new file mode 100644 index 000000000..241ac246d --- /dev/null +++ b/other-cel/ensure-probes-different/ensure-probes-different.yaml @@ -0,0 +1,38 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: validate-probes + annotations: + pod-policies.kyverno.io/autogen-controllers: none + policies.kyverno.io/title: Validate Probes in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Liveness and readiness probes accomplish different goals, and setting both to the same + is an anti-pattern and often results in app problems in the future. This policy + checks that liveness and readiness probes are not equal. Keep in mind that if both the + probes are not set, they are considered to be equal and hence fails the check. +spec: + validationFailureAction: Audit + background: false + rules: + - name: validate-probes + match: + any: + - resources: + kinds: + - Deployment + - DaemonSet + - StatefulSet + validate: + cel: + expressions: + - expression: >- + !object.spec.template.spec.containers.exists(container, + has(container.readinessProbe) && has(container.livenessProbe) && + container.readinessProbe == container.livenessProbe) + message: "Liveness and readiness probes cannot be the same." + diff --git a/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pod-02.yaml b/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pod-02.yaml new file mode 100644 index 000000000..be990147d --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pod-02.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bad-pod-02 +spec: + containers: + - name: test-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: false + volumes: + - name: foo + hostPath: + path: /var/log + diff --git a/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pod-03.yaml b/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pod-03.yaml new file mode 100644 index 000000000..351cf44bc --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pod-03.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bad-pod-03 +spec: + containers: + - name: test-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - mountPath: /etc + name: bar + volumes: + - name: foo + hostPath: + path: /var/log + - name: bar + hostPath: + path: /etc + diff --git a/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pod-04.yaml b/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pod-04.yaml new file mode 100644 index 000000000..4b0703656 --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pod-04.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bad-pod-04 +spec: + containers: + - name: test-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - mountPath: /etc + name: bar + readOnly: false + volumes: + - name: foo + hostPath: + path: /var/log + - name: bar + hostPath: + path: /etc + diff --git a/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pod-05.yaml b/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pod-05.yaml new file mode 100644 index 000000000..9f803e8ec --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pod-05.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bad-pod-05 +spec: + containers: + - name: test-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - mountPath: /etc + name: bar + - name: test-webserver02 + image: sjbonmqopcta:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + volumes: + - name: foo + hostPath: + path: /var/log + - name: bar + hostPath: + path: /etc + diff --git a/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pods-all.yaml b/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pods-all.yaml new file mode 100644 index 000000000..8783a36ea --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.chainsaw-test/bad-pods-all.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bad-pods-all +spec: + ephemeralContainers: + - name: ephemtest-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: false + initContainers: + - name: inittest-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: bar + readOnly: true + containers: + - name: test-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - name: test-webserver02 + image: sjbonmqopcta:latest + volumeMounts: + - mountPath: /some/dir + name: bar + readOnly: true + volumes: + - name: foo + hostPath: + path: /var/log + - name: bar + hostPath: + path: /etc + diff --git a/other-cel/ensure-readonly-hostpath/.chainsaw-test/chainsaw-test.yaml b/other-cel/ensure-readonly-hostpath/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..d6e330389 --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,66 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: ensure-readonly-hostpath +spec: + steps: + - name: step-01 + try: + - apply: + file: ../ensure-readonly-hostpath.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: ensure-readonly-hostpath + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pods-good.yaml + - apply: + file: ../.kyverno-test/good-pod-01.yaml + - apply: + expect: + - check: + ($error != null): true + file: pods-bad.yaml + - apply: + expect: + - check: + ($error != null): true + file: ../.kyverno-test/bad-pod-01.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-pod-02.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-pod-03.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-pod-04.yaml + - apply: + expect: + - check: + ($error != null): true + file: bad-pod-05.yaml + - apply: + file: podcontrollers-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontrollers-bad.yaml + diff --git a/other-cel/ensure-readonly-hostpath/.chainsaw-test/good-pods-all.yaml b/other-cel/ensure-readonly-hostpath/.chainsaw-test/good-pods-all.yaml new file mode 100644 index 000000000..ebc36fa0b --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.chainsaw-test/good-pods-all.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bad-pods-all +spec: + ephemeralContainers: + - name: ephemtest-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + initContainers: + - name: inittest-webserver + image: fjtyonaq:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + containers: + - name: test-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - name: test-webserver02 + image: sjbonmqopcta:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + volumes: + - name: foo + hostPath: + path: /var/log + diff --git a/other-cel/ensure-readonly-hostpath/.chainsaw-test/podcontrollers-bad.yaml b/other-cel/ensure-readonly-hostpath/.chainsaw-test/podcontrollers-bad.yaml new file mode 100644 index 000000000..3ecab0318 --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.chainsaw-test/podcontrollers-bad.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + initContainers: + - name: busybox-init + image: busybox:1.35 + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - mountPath: /some/another/dir + name: foo + readOnly: false + containers: + - name: busybox + image: busybox:1.35 + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + volumes: + - name: foo + hostPath: + path: /var/log +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: busybox + spec: + initContainers: + - name: busybox-init + image: busybox:1.35 + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - mountPath: /some/another/dir + name: foo + readOnly: false + containers: + - name: busybox + image: busybox:1.35 + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + volumes: + - name: foo + hostPath: + path: /var/log + restartPolicy: OnFailure + diff --git a/other-cel/ensure-readonly-hostpath/.chainsaw-test/podcontrollers-good.yaml b/other-cel/ensure-readonly-hostpath/.chainsaw-test/podcontrollers-good.yaml new file mode 100644 index 000000000..d7d6eaac7 --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.chainsaw-test/podcontrollers-good.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + initContainers: + - name: busybox-init + image: busybox:1.35 + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - mountPath: /some/another/dir + name: foo + readOnly: true + containers: + - name: busybox + image: busybox:1.35 + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + volumes: + - name: foo + hostPath: + path: /var/log +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: busybox + spec: + initContainers: + - name: busybox-init + image: busybox:1.35 + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - mountPath: /some/another/dir + name: foo + readOnly: true + containers: + - name: busybox + image: busybox:1.35 + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + volumes: + - name: foo + hostPath: + path: /var/log + restartPolicy: OnFailure + diff --git a/other-cel/ensure-readonly-hostpath/.chainsaw-test/pods-bad.yaml b/other-cel/ensure-readonly-hostpath/.chainsaw-test/pods-bad.yaml new file mode 100644 index 000000000..b11e3fc29 --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.chainsaw-test/pods-bad.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + initContainers: + - name: busybox-init + image: busybox:1.35 + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - mountPath: /some/another/dir + name: foo + readOnly: false + containers: + - name: busybox + image: busybox:1.35 + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + volumes: + - name: foo + hostPath: + path: /var/log + diff --git a/other-cel/ensure-readonly-hostpath/.chainsaw-test/pods-good.yaml b/other-cel/ensure-readonly-hostpath/.chainsaw-test/pods-good.yaml new file mode 100644 index 000000000..15a373855 --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.chainsaw-test/pods-good.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + initContainers: + - name: busybox-init + image: busybox:1.35 + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + - mountPath: /some/another/dir + name: foo + readOnly: true + containers: + - name: busybox + image: busybox:1.35 + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + volumes: + - name: foo + hostPath: + path: /var/log + diff --git a/other-cel/ensure-readonly-hostpath/.chainsaw-test/policy-ready.yaml b/other-cel/ensure-readonly-hostpath/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..b9b225ce6 --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: ensure-readonly-hostpath +status: + ready: true + diff --git a/other-cel/ensure-readonly-hostpath/.kyverno-test/bad-pod-01.yaml b/other-cel/ensure-readonly-hostpath/.kyverno-test/bad-pod-01.yaml new file mode 100644 index 000000000..51839206c --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.kyverno-test/bad-pod-01.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bad-pod-01 +spec: + containers: + - name: test-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + volumes: + - name: foo + hostPath: + path: /var/log + diff --git a/other-cel/ensure-readonly-hostpath/.kyverno-test/good-pod-01.yaml b/other-cel/ensure-readonly-hostpath/.kyverno-test/good-pod-01.yaml new file mode 100644 index 000000000..9c6331dcf --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.kyverno-test/good-pod-01.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: good-pod-01 +spec: + containers: + - name: test-webserver + image: asdfeasdfasada:latest + volumeMounts: + - mountPath: /some/dir + name: foo + readOnly: true + volumes: + - name: foo + hostPath: + path: /var/log + diff --git a/other-cel/ensure-readonly-hostpath/.kyverno-test/kyverno-test.yaml b/other-cel/ensure-readonly-hostpath/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..e37b5e0c3 --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,23 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: ensure-readonly-hostpath +policies: +- ../ensure-readonly-hostpath.yaml +resources: +- good-pod-01.yaml +- bad-pod-01.yaml +results: +- kind: Pod + policy: ensure-readonly-hostpath + resources: + - bad-pod-01 + result: fail + rule: ensure-hostpaths-readonly +- kind: Pod + policy: ensure-readonly-hostpath + resources: + - good-pod-01 + result: pass + rule: ensure-hostpaths-readonly + diff --git a/other-cel/ensure-readonly-hostpath/artifacthub-pkg.yml b/other-cel/ensure-readonly-hostpath/artifacthub-pkg.yml new file mode 100644 index 000000000..1c09131ab --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: ensure-readonly-hostpath-cel +version: 1.0.0 +displayName: Ensure Read Only hostPath in CEL expressions +description: >- + Pods which are allowed to mount hostPath volumes in read/write mode pose a security risk even if confined to a "safe" file system on the host and may escape those confines (see https://blog.aquasec.com/kubernetes-security-pod-escape-log-mounts). The only true way to ensure safety is to enforce that all Pods mounting hostPath volumes do so in read only mode. This policy checks all containers for any hostPath volumes and ensures they are explicitly mounted in readOnly mode. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/ensure-readonly-hostpath/ensure-readonly-hostpath.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + Pods which are allowed to mount hostPath volumes in read/write mode pose a security risk even if confined to a "safe" file system on the host and may escape those confines (see https://blog.aquasec.com/kubernetes-security-pod-escape-log-mounts). The only true way to ensure safety is to enforce that all Pods mounting hostPath volumes do so in read only mode. This policy checks all containers for any hostPath volumes and ensures they are explicitly mounted in readOnly mode. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 5335b84399ed1bb06e70489940d2555cff0c97f7f937aac0fbdf8ee0a188ace1 +createdAt: "2024-04-05T17:39:16Z" + diff --git a/other-cel/ensure-readonly-hostpath/ensure-readonly-hostpath.yaml b/other-cel/ensure-readonly-hostpath/ensure-readonly-hostpath.yaml new file mode 100644 index 000000000..ea97b78be --- /dev/null +++ b/other-cel/ensure-readonly-hostpath/ensure-readonly-hostpath.yaml @@ -0,0 +1,43 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: ensure-readonly-hostpath + annotations: + policies.kyverno.io/title: Ensure Read Only hostPath in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Pods which are allowed to mount hostPath volumes in read/write mode pose a security risk + even if confined to a "safe" file system on the host and may escape those confines (see + https://blog.aquasec.com/kubernetes-security-pod-escape-log-mounts). The only true way + to ensure safety is to enforce that all Pods mounting hostPath volumes do so in read only + mode. This policy checks all containers for any hostPath volumes and ensures they are + explicitly mounted in readOnly mode. +spec: + background: false + validationFailureAction: Audit + rules: + - name: ensure-hostpaths-readonly + match: + any: + - resources: + kinds: + - Pod + validate: + cel: + variables: + - name: allContainers + expression: "(object.spec.containers + (has(object.spec.initContainers) ? object.spec.initContainers : []) + (has(object.spec.ephemeralContainers) ? object.spec.ephemeralContainers : []))" + - name: hostPathVolumes + expression: "has(object.spec.volumes) ? object.spec.volumes.filter(volume, has(volume.hostPath)) : []" + expressions: + - expression: >- + variables.hostPathVolumes.all(hostPath, variables.allContainers.all(container, + !has(container.volumeMounts) || + container.volumeMounts.all(volume, (hostPath.name != volume.name) || has(volume.readOnly) && volume.readOnly == true))) + message: All hostPath volumes must be mounted as readOnly. + diff --git a/other-cel/forbid-cpu-limits/.chainsaw-test/chainsaw-test.yaml b/other-cel/forbid-cpu-limits/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..5025169a3 --- /dev/null +++ b/other-cel/forbid-cpu-limits/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,39 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: forbid-cpu-limits +spec: + steps: + - name: step-01 + try: + - apply: + file: ../forbid-cpu-limits.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: forbid-cpu-limits + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pods-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pods-bad.yaml + - apply: + file: podcontrollers-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontrollers-bad.yaml + diff --git a/other-cel/forbid-cpu-limits/.chainsaw-test/podcontrollers-bad.yaml b/other-cel/forbid-cpu-limits/.chainsaw-test/podcontrollers-bad.yaml new file mode 100644 index 000000000..30cd26d52 --- /dev/null +++ b/other-cel/forbid-cpu-limits/.chainsaw-test/podcontrollers-bad.yaml @@ -0,0 +1,100 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + resources: + limits: + cpu: 10m +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: webserver1 + image: busybox:1.35 + resources: + requests: + cpu: 10m + - name: webserver2 + image: busybox:1.35 + resources: + limits: + cpu: 10m +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + resources: + limits: + cpu: 10m + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: webserver1 + image: busybox:1.35 + resources: + requests: + cpu: 10m + - name: webserver2 + image: busybox:1.35 + resources: + limits: + cpu: 10m + restartPolicy: OnFailure + diff --git a/other-cel/forbid-cpu-limits/.chainsaw-test/podcontrollers-good.yaml b/other-cel/forbid-cpu-limits/.chainsaw-test/podcontrollers-good.yaml new file mode 100644 index 000000000..03c668ef2 --- /dev/null +++ b/other-cel/forbid-cpu-limits/.chainsaw-test/podcontrollers-good.yaml @@ -0,0 +1,84 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment02 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + resources: + requests: + cpu: 10m +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + restartPolicy: OnFailure +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob02 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: busybox + spec: + containers: + - image: busybox:1.35 + name: busybox + resources: + requests: + cpu: 10m + restartPolicy: OnFailure + diff --git a/other-cel/forbid-cpu-limits/.chainsaw-test/pods-bad.yaml b/other-cel/forbid-cpu-limits/.chainsaw-test/pods-bad.yaml new file mode 100644 index 000000000..f24adfe86 --- /dev/null +++ b/other-cel/forbid-cpu-limits/.chainsaw-test/pods-bad.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bad01 +spec: + containers: + - name: webserver1 + image: busybox:1.35 + resources: + limits: + cpu: 10m +--- +apiVersion: v1 +kind: Pod +metadata: + name: bad02 +spec: + containers: + - name: webserver1 + image: busybox:1.35 + resources: + requests: + cpu: 10m + - name: webserver2 + image: busybox:1.35 + resources: + limits: + cpu: 10m + diff --git a/other-cel/forbid-cpu-limits/.chainsaw-test/pods-good.yaml b/other-cel/forbid-cpu-limits/.chainsaw-test/pods-good.yaml new file mode 100644 index 000000000..97629fef4 --- /dev/null +++ b/other-cel/forbid-cpu-limits/.chainsaw-test/pods-good.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Pod +metadata: + name: good01 +spec: + containers: + - name: webserver1 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: good02 +spec: + containers: + - name: webserver1 + image: busybox:1.35 + resources: + requests: + cpu: 10m + diff --git a/other-cel/forbid-cpu-limits/.chainsaw-test/policy-ready.yaml b/other-cel/forbid-cpu-limits/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..2babcc6e6 --- /dev/null +++ b/other-cel/forbid-cpu-limits/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: forbid-cpu-limits +status: + ready: true + diff --git a/other-cel/forbid-cpu-limits/.kyverno-test/kyverno-test.yaml b/other-cel/forbid-cpu-limits/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..7e3a2c5ef --- /dev/null +++ b/other-cel/forbid-cpu-limits/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,24 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: forbid-cpu-limits +policies: +- ../forbid-cpu-limits.yaml +resources: +- resource.yaml +results: +- kind: Pod + policy: forbid-cpu-limits + resources: + - bad01 + - bad02 + result: fail + rule: check-cpu-limits +- kind: Pod + policy: forbid-cpu-limits + resources: + - good01 + - good02 + result: pass + rule: check-cpu-limits + diff --git a/other-cel/forbid-cpu-limits/.kyverno-test/resource.yaml b/other-cel/forbid-cpu-limits/.kyverno-test/resource.yaml new file mode 100644 index 000000000..f29885f30 --- /dev/null +++ b/other-cel/forbid-cpu-limits/.kyverno-test/resource.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: Pod +metadata: + name: bad01 +spec: + containers: + - name: webserver1 + image: busybox:1.35 + resources: + limits: + cpu: 10m +--- +apiVersion: v1 +kind: Pod +metadata: + name: bad02 +spec: + containers: + - name: webserver1 + image: busybox:1.35 + resources: + requests: + cpu: 10m + - name: webserver2 + image: busybox:1.35 + resources: + limits: + cpu: 10m +--- +apiVersion: v1 +kind: Pod +metadata: + name: good01 +spec: + containers: + - name: webserver1 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: good02 +spec: + containers: + - name: webserver1 + image: busybox:1.35 + resources: + requests: + cpu: 10m + diff --git a/other-cel/forbid-cpu-limits/artifacthub-pkg.yml b/other-cel/forbid-cpu-limits/artifacthub-pkg.yml new file mode 100644 index 000000000..3e86cb948 --- /dev/null +++ b/other-cel/forbid-cpu-limits/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: forbid-cpu-limits-cel +version: 1.0.0 +displayName: Forbid CPU Limits in CEL expressions +description: >- + Setting of CPU limits is a debatable poor practice as it can result, when defined, in potentially starving applications of much-needed CPU cycles even when they are available. Ensuring that CPU limits are not set may ensure apps run more effectively. This policy forbids any container in a Pod from defining CPU limits. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/forbid-cpu-limits/forbid-cpu-limits.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + Setting of CPU limits is a debatable poor practice as it can result, when defined, in potentially starving applications of much-needed CPU cycles even when they are available. Ensuring that CPU limits are not set may ensure apps run more effectively. This policy forbids any container in a Pod from defining CPU limits. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 2865e5f92968f90e090aff597937ab7db3e3e5939c32cb84c84f881970dedae6 +createdAt: "2024-04-01T15:35:47Z" + diff --git a/other-cel/forbid-cpu-limits/forbid-cpu-limits.yaml b/other-cel/forbid-cpu-limits/forbid-cpu-limits.yaml new file mode 100644 index 000000000..b94b55756 --- /dev/null +++ b/other-cel/forbid-cpu-limits/forbid-cpu-limits.yaml @@ -0,0 +1,32 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: forbid-cpu-limits + annotations: + policies.kyverno.io/title: Forbid CPU Limits in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/subject: Pod + kyverno.io/kyverno-version: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + Setting of CPU limits is a debatable poor practice as it can result, when defined, in potentially starving + applications of much-needed CPU cycles even when they are available. Ensuring that CPU limits are not + set may ensure apps run more effectively. This policy forbids any container in a Pod from defining CPU limits. +spec: + background: true + validationFailureAction: Audit + rules: + - name: check-cpu-limits + match: + any: + - resources: + kinds: + - Pod + validate: + cel: + expressions: + - expression: >- + !object.spec.containers.exists(container, + has(container.resources) && has(container.resources.limits) && has(container.resources.limits.cpu)) + message: Containers may not define CPU limits. + diff --git a/other-cel/imagepullpolicy-always/.chainsaw-test/chainsaw-test.yaml b/other-cel/imagepullpolicy-always/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..aadc8a25e --- /dev/null +++ b/other-cel/imagepullpolicy-always/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,39 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: imagepullpolicy-always +spec: + steps: + - name: step-01 + try: + - apply: + file: ../imagepullpolicy-always.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: imagepullpolicy-always + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml + diff --git a/other-cel/imagepullpolicy-always/.chainsaw-test/pod-bad.yaml b/other-cel/imagepullpolicy-always/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..6949ea364 --- /dev/null +++ b/other-cel/imagepullpolicy-always/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: pod01 + image: busybox:latest + imagePullPolicy: Never +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod02 +spec: + containers: + - name: pod01 + image: busybox + imagePullPolicy: IfNotPresent +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod03 +spec: + containers: + - name: pod01 + image: busybox:latest + imagePullPolicy: Always + - name: pod02 + image: busybox:latest + imagePullPolicy: IfNotPresent +--- +apiVersion: v1 +kind: Pod +metadata: + name: badpod04 +spec: + containers: + - name: pod01 + image: busybox:latest + imagePullPolicy: Never + - name: pod02 + image: busybox:1.35 + diff --git a/other-cel/imagepullpolicy-always/.chainsaw-test/pod-good.yaml b/other-cel/imagepullpolicy-always/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..e95c6524f --- /dev/null +++ b/other-cel/imagepullpolicy-always/.chainsaw-test/pod-good.yaml @@ -0,0 +1,62 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: pod01 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: pod01 + image: busybox # by default, imagePullPolicy: Always +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod03 +spec: + containers: + - name: pod01 + image: busybox:latest # by default, imagePullPolicy: Always +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod04 +spec: + containers: + - name: pod01 + image: busybox:latest + imagePullPolicy: Always +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod05 +spec: + containers: + - name: pod01 + image: busybox:latest + imagePullPolicy: Always + - name: pod02 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod06 +spec: + containers: + - name: pod01 + image: busybox:1.35 + imagePullPolicy: IfNotPresent + - name: pod02 + image: busybox:latest + imagePullPolicy: Always + diff --git a/other-cel/imagepullpolicy-always/.chainsaw-test/podcontroller-bad.yaml b/other-cel/imagepullpolicy-always/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..aa3f6ecf1 --- /dev/null +++ b/other-cel/imagepullpolicy-always/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: bb01 + image: busybox:1.35 + - name: bb02 + image: busybox:latest + imagePullPolicy: Never + - name: bb03 + image: busybox + imagePullPolicy: IfNotPresent +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: bb01 + image: busybox:latest + imagePullPolicy: Never + - name: bb02 + image: busybox:1.35 + - name: bb03 + image: busybox + imagePullPolicy: IfNotPresent + restartPolicy: OnFailure + diff --git a/other-cel/imagepullpolicy-always/.chainsaw-test/podcontroller-good.yaml b/other-cel/imagepullpolicy-always/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..2e93055ba --- /dev/null +++ b/other-cel/imagepullpolicy-always/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,44 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: bb01 + image: busybox:1.35 + - name: bb02 + image: busybox:latest + - name: bb03 + image: busybox +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: bb01 + image: busybox:1.35 + - name: bb02 + image: busybox:latest + - name: bb03 + image: busybox + restartPolicy: OnFailure + diff --git a/other-cel/imagepullpolicy-always/.chainsaw-test/policy-ready.yaml b/other-cel/imagepullpolicy-always/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..843ff7ef8 --- /dev/null +++ b/other-cel/imagepullpolicy-always/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: imagepullpolicy-always +status: + ready: true + diff --git a/other-cel/imagepullpolicy-always/.kyverno-test/kyverno-test.yaml b/other-cel/imagepullpolicy-always/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c5406a07a --- /dev/null +++ b/other-cel/imagepullpolicy-always/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,34 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: imagepullpolicy-always +policies: +- ../imagepullpolicy-always.yaml +resources: +- resource.yaml +results: +- kind: Deployment + policy: imagepullpolicy-always + resources: + - mydeploy1 + result: fail + rule: imagepullpolicy-always +- kind: Pod + policy: imagepullpolicy-always + resources: + - myapp-pod-2 + result: fail + rule: imagepullpolicy-always +- kind: Deployment + policy: imagepullpolicy-always + resources: + - mydeploy2 + result: pass + rule: imagepullpolicy-always +- kind: Pod + policy: imagepullpolicy-always + resources: + - myapp-pod-1 + result: pass + rule: imagepullpolicy-always + diff --git a/other-cel/imagepullpolicy-always/.kyverno-test/resource.yaml b/other-cel/imagepullpolicy-always/.kyverno-test/resource.yaml new file mode 100644 index 000000000..4df92e40f --- /dev/null +++ b/other-cel/imagepullpolicy-always/.kyverno-test/resource.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +kind: Pod +metadata: + name: myapp-pod-1 + labels: + app: myapp-1 +spec: + containers: + - name: nginx + image: nginx:latest + imagePullPolicy: "Always" + +--- +apiVersion: v1 +kind: Pod +metadata: + name: myapp-pod-2 + labels: + app: myapp-2 +spec: + containers: + - name: nginx + image: nginx:latest + imagePullPolicy: "IfNotPresent" +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mydeploy1 +spec: + replicas: 2 + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - name: nginx + image: nginx + imagePullPolicy: "IfNotPresent" + ports: + - containerPort: 80 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mydeploy2 +spec: + replicas: 2 + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - name: nginx + image: nginx + imagePullPolicy: "Always" + ports: + - containerPort: 80 + diff --git a/other-cel/imagepullpolicy-always/artifacthub-pkg.yml b/other-cel/imagepullpolicy-always/artifacthub-pkg.yml new file mode 100644 index 000000000..53537fb8b --- /dev/null +++ b/other-cel/imagepullpolicy-always/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: imagepullpolicy-always-cel +version: 1.0.0 +displayName: Require imagePullPolicy Always in CEL expressions +description: >- + If the `latest` tag is allowed for images, it is a good idea to have the imagePullPolicy field set to `Always` to ensure should that tag be overwritten that future pulls will get the updated image. This policy validates the imagePullPolicy is set to `Always` when the `latest` tag is specified explicitly or where a tag is not defined at all. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/imagepullpolicy-always/imagepullpolicy-always.yaml + ``` +keywords: + - kyverno + - Sample + - CEL Expressions +readme: | + If the `latest` tag is allowed for images, it is a good idea to have the imagePullPolicy field set to `Always` to ensure should that tag be overwritten that future pulls will get the updated image. This policy validates the imagePullPolicy is set to `Always` when the `latest` tag is specified explicitly or where a tag is not defined at all. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: a6708df7cd59fcd4dc4f764ff01541940f39eca5d4ddffd9529d83090e511b47 +createdAt: "2024-04-03T17:41:38Z" + diff --git a/other-cel/imagepullpolicy-always/imagepullpolicy-always.yaml b/other-cel/imagepullpolicy-always/imagepullpolicy-always.yaml new file mode 100644 index 000000000..951d07495 --- /dev/null +++ b/other-cel/imagepullpolicy-always/imagepullpolicy-always.yaml @@ -0,0 +1,36 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: imagepullpolicy-always + annotations: + policies.kyverno.io/title: Require imagePullPolicy Always in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: Pod + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + If the `latest` tag is allowed for images, it is a good idea to have the + imagePullPolicy field set to `Always` to ensure should that tag be overwritten that future + pulls will get the updated image. This policy validates the imagePullPolicy is set to `Always` + when the `latest` tag is specified explicitly or where a tag is not defined at all. +spec: + validationFailureAction: Audit + background: true + rules: + - name: imagepullpolicy-always + match: + any: + - resources: + kinds: + - Pod + validate: + cel: + expressions: + - expression: >- + object.spec.containers.all(container, + (container.image.endsWith(':latest') || !container.image.contains(':')) ? + container.imagePullPolicy == 'Always' : true) + message: >- + The imagePullPolicy must be set to `Always` when the tag `latest` is used. + diff --git a/other-cel/ingress-host-match-tls/.chainsaw-test/chainsaw-test.yaml b/other-cel/ingress-host-match-tls/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..ab409abe4 --- /dev/null +++ b/other-cel/ingress-host-match-tls/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,32 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: ingress-host-match-tls +spec: + steps: + - name: step-01 + try: + - apply: + file: ../ingress-host-match-tls.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: ingress-host-match-tls + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: ingress-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: ingress-bad.yaml + diff --git a/other-cel/ingress-host-match-tls/.chainsaw-test/ingress-bad.yaml b/other-cel/ingress-host-match-tls/.chainsaw-test/ingress-bad.yaml new file mode 100644 index 000000000..4ac1a456a --- /dev/null +++ b/other-cel/ingress-host-match-tls/.chainsaw-test/ingress-bad.yaml @@ -0,0 +1,83 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: badingress01 +spec: + ingressClassName: someingress + rules: + - host: endpoint01 + http: + paths: + - backend: + service: + name: demo-svc + port: + number: 8080 + path: / + pathType: Prefix + tls: + - hosts: + - endpoint99 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: badingress02 +spec: + ingressClassName: someingress + rules: + - host: endpoint01 + http: + paths: + - backend: + service: + name: demo-svc + port: + number: 8080 + path: / + pathType: Prefix + - host: endpoint02 + http: + paths: + - backend: + service: + name: demo-svc + port: + number: 8080 + path: / + pathType: Prefix + tls: + - hosts: + - endpoint03 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: badingress03 +spec: + ingressClassName: someingress + rules: + - host: endpoint01 + http: + paths: + - backend: + service: + name: demo-svc + port: + number: 8080 + path: / + pathType: Prefix + - host: endpoint02 + http: + paths: + - backend: + service: + name: demo-svc + port: + number: 8080 + path: / + pathType: Prefix + tls: + - hosts: + - endpoint01 + diff --git a/other-cel/ingress-host-match-tls/.chainsaw-test/ingress-good.yaml b/other-cel/ingress-host-match-tls/.chainsaw-test/ingress-good.yaml new file mode 100644 index 000000000..c1ef3d3dd --- /dev/null +++ b/other-cel/ingress-host-match-tls/.chainsaw-test/ingress-good.yaml @@ -0,0 +1,202 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: goodingress01 +spec: + ingressClassName: someingress + rules: + - host: endpoint01 + http: + paths: + - backend: + service: + name: demo-svc + port: + number: 8080 + path: / + pathType: Prefix + tls: + - hosts: + - endpoint01 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: goodingress02 +spec: + ingressClassName: nginx-int + rules: + - host: endpoint01 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + - host: endpoint02 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + tls: + - hosts: + - endpoint01 + - endpoint02 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: goodingress03 +spec: + ingressClassName: nginx-int + rules: + - host: endpoint01 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + - host: endpoint02 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + tls: + - hosts: + - endpoint02 + - endpoint01 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: goodingress04 +spec: + ingressClassName: nginx-int + rules: + - host: endpoint02 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + - host: endpoint01 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + tls: + - hosts: + - endpoint01 + - endpoint02 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: goodingress05 +spec: + ingressClassName: nginx-int + rules: + - host: foo.bar.com + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + - host: endpoint02 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + tls: + - hosts: + - endpoint02 + - foo.bar.com +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: goodingress06 +spec: + ingressClassName: nginx-int + rules: + - host: endpoint01 + http: + paths: + - path: /foo + pathType: Prefix + backend: + service: + name: bar + port: + number: 80 + - host: "*.foo.com" + http: + paths: + - path: /bar + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + tls: + - hosts: + - endpoint01 + - "*.foo.com" +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: goodingress07 +spec: + defaultBackend: + resource: + apiGroup: k8s.example.com + kind: StorageBucket + name: foo-bar + rules: + - http: + paths: + - path: /foo + pathType: ImplementationSpecific + backend: + resource: + apiGroup: k8s.example.com + kind: StorageBucket + name: foo-bar + diff --git a/other-cel/ingress-host-match-tls/.chainsaw-test/policy-ready.yaml b/other-cel/ingress-host-match-tls/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..e9271befe --- /dev/null +++ b/other-cel/ingress-host-match-tls/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: ingress-host-match-tls +status: + ready: true + diff --git a/other-cel/ingress-host-match-tls/.kyverno-test/kyverno-test.yaml b/other-cel/ingress-host-match-tls/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..0ce9eb5ef --- /dev/null +++ b/other-cel/ingress-host-match-tls/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,26 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: ingress-host-match-tls +policies: +- ../ingress-host-match-tls.yaml +resources: +- resource.yaml +results: +- kind: Ingress + policy: ingress-host-match-tls + resources: + - badingress01 + - badingress02 + result: fail + rule: host-match-tls +- kind: Ingress + policy: ingress-host-match-tls + resources: + - goodingress01 + - goodingress02 + - goodingress03 + - goodingress04 + result: pass + rule: host-match-tls + diff --git a/other-cel/ingress-host-match-tls/.kyverno-test/resource.yaml b/other-cel/ingress-host-match-tls/.kyverno-test/resource.yaml new file mode 100644 index 000000000..b83e8bf7f --- /dev/null +++ b/other-cel/ingress-host-match-tls/.kyverno-test/resource.yaml @@ -0,0 +1,170 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: badingress01 +spec: + ingressClassName: someingress + rules: + - host: endpoint01 + http: + paths: + - backend: + service: + name: demo-svc + port: + number: 8080 + path: / + pathType: Prefix + tls: + - hosts: + - endpoint99 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: badingress02 +spec: + ingressClassName: someingress + rules: + - host: endpoint01 + http: + paths: + - backend: + service: + name: demo-svc + port: + number: 8080 + path: / + pathType: Prefix + - host: endpoint02 + http: + paths: + - backend: + service: + name: demo-svc + port: + number: 8080 + path: / + pathType: Prefix + tls: + - hosts: + - endpoint03 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: goodingress01 +spec: + ingressClassName: someingress + rules: + - host: endpoint01 + http: + paths: + - backend: + service: + name: demo-svc + port: + number: 8080 + path: / + pathType: Prefix + tls: + - hosts: + - endpoint01 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: goodingress02 +spec: + ingressClassName: nginx-int + rules: + - host: endpoint01 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + - host: endpoint02 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + tls: + - hosts: + - endpoint01 + - endpoint02 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: goodingress03 +spec: + ingressClassName: nginx-int + rules: + - host: endpoint01 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + - host: endpoint02 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + tls: + - hosts: + - endpoint02 + - endpoint01 +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: goodingress04 +spec: + ingressClassName: nginx-int + rules: + - host: endpoint02 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + - host: endpoint01 + http: + paths: + - path: /testpath + pathType: Prefix + backend: + service: + name: test + port: + number: 80 + tls: + - hosts: + - endpoint01 + - endpoint02 + diff --git a/other-cel/ingress-host-match-tls/artifacthub-pkg.yml b/other-cel/ingress-host-match-tls/artifacthub-pkg.yml new file mode 100644 index 000000000..344cc88d1 --- /dev/null +++ b/other-cel/ingress-host-match-tls/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: ingress-host-match-tls-cel +version: 1.0.0 +displayName: Ingress Host Match TLS in CEL expressions +description: >- + Ingress resources which name a host name that is not present in the TLS section can produce ingress routing failures as a TLS certificate may not correspond to the destination host. This policy ensures that the host name in an Ingress rule is also found in the list of TLS hosts. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/ingress-host-match-tls/ingress-host-match-tls.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + Ingress resources which name a host name that is not present in the TLS section can produce ingress routing failures as a TLS certificate may not correspond to the destination host. This policy ensures that the host name in an Ingress rule is also found in the list of TLS hosts. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Ingress" +digest: 5442acaa90c6a45509015995028e241374b76d60cc700fbf6dd9f61178ba432f +createdAt: "2024-04-06T17:22:38Z" + diff --git a/other-cel/ingress-host-match-tls/ingress-host-match-tls.yaml b/other-cel/ingress-host-match-tls/ingress-host-match-tls.yaml new file mode 100644 index 000000000..27bb57185 --- /dev/null +++ b/other-cel/ingress-host-match-tls/ingress-host-match-tls.yaml @@ -0,0 +1,40 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: ingress-host-match-tls + annotations: + policies.kyverno.io/title: Ingress Host Match TLS in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + kyverno.io/kyverno-version: 1.11.0 + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Ingress + policies.kyverno.io/description: >- + Ingress resources which name a host name that is not present + in the TLS section can produce ingress routing failures as a TLS + certificate may not correspond to the destination host. This policy + ensures that the host name in an Ingress rule is also found + in the list of TLS hosts. +spec: + background: false + validationFailureAction: Audit + rules: + - name: host-match-tls + match: + any: + - resources: + kinds: + - Ingress + validate: + cel: + variables: + - name: tls + expression: "has(object.spec.tls) ? object.spec.tls : []" + expressions: + - expression: >- + object.spec.rules.all(rule, + !has(rule.host) || + variables.tls.exists(tls, has(tls.hosts) && tls.hosts.exists(tlsHost, tlsHost == rule.host))) + message: "The host(s) in spec.rules[].host must match those in spec.tls[].hosts[]." + diff --git a/other-cel/limit-containers-per-pod/.chainsaw-test/chainsaw-test.yaml b/other-cel/limit-containers-per-pod/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..9722cde91 --- /dev/null +++ b/other-cel/limit-containers-per-pod/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,39 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: limit-containers-per-pod +spec: + steps: + - name: step-01 + try: + - apply: + file: ../limit-containers-per-pod.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: limit-containers-per-pod + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pod-good.yaml + - apply: + file: podcontroller-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pod-bad.yaml + - apply: + expect: + - check: + ($error != null): true + file: podcontroller-bad.yaml + diff --git a/other-cel/limit-containers-per-pod/.chainsaw-test/pod-bad.yaml b/other-cel/limit-containers-per-pod/.chainsaw-test/pod-bad.yaml new file mode 100644 index 000000000..96992623a --- /dev/null +++ b/other-cel/limit-containers-per-pod/.chainsaw-test/pod-bad.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: badpod01 +spec: + containers: + - name: pod01 + image: busybox:1.35 + - name: pod02 + image: busybox:1.35 + - name: pod03 + image: busybox:1.35 + - name: pod04 + image: busybox:1.35 + - name: pod05 + image: busybox:1.35 + diff --git a/other-cel/limit-containers-per-pod/.chainsaw-test/pod-good.yaml b/other-cel/limit-containers-per-pod/.chainsaw-test/pod-good.yaml new file mode 100644 index 000000000..c1f96536b --- /dev/null +++ b/other-cel/limit-containers-per-pod/.chainsaw-test/pod-good.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: goodpod01 +spec: + containers: + - name: pod01 + image: busybox:1.35 + - name: pod02 + image: busybox:1.35 + - name: pod03 + image: busybox:1.35 + - name: pod04 + image: busybox:1.35 +--- +apiVersion: v1 +kind: Pod +metadata: + name: goodpod02 +spec: + containers: + - name: pod01 + image: busybox:1.35 + - name: pod02 + image: busybox:1.35 + diff --git a/other-cel/limit-containers-per-pod/.chainsaw-test/podcontroller-bad.yaml b/other-cel/limit-containers-per-pod/.chainsaw-test/podcontroller-bad.yaml new file mode 100644 index 000000000..718cf5d4d --- /dev/null +++ b/other-cel/limit-containers-per-pod/.chainsaw-test/podcontroller-bad.yaml @@ -0,0 +1,52 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: baddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: bb01 + image: busybox:1.35 + - name: bb02 + image: busybox:1.35 + - name: bb03 + image: busybox:1.35 + - name: bb04 + image: busybox:1.35 + - name: bb05 + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: badcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: bb01 + image: busybox:1.35 + - name: bb02 + image: busybox:1.35 + - name: bb03 + image: busybox:1.35 + - name: bb04 + image: busybox:1.35 + - name: bb05 + image: busybox:1.35 + restartPolicy: OnFailure + diff --git a/other-cel/limit-containers-per-pod/.chainsaw-test/podcontroller-good.yaml b/other-cel/limit-containers-per-pod/.chainsaw-test/podcontroller-good.yaml new file mode 100644 index 000000000..1a380e941 --- /dev/null +++ b/other-cel/limit-containers-per-pod/.chainsaw-test/podcontroller-good.yaml @@ -0,0 +1,48 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: busybox + name: gooddeployment01 +spec: + replicas: 1 + selector: + matchLabels: + app: busybox + strategy: {} + template: + metadata: + labels: + app: busybox + spec: + containers: + - name: bb01 + image: busybox:1.35 + - name: bb02 + image: busybox:1.35 + - name: bb03 + image: busybox:1.35 + - name: bb04 + image: busybox:1.35 +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: goodcronjob01 +spec: + schedule: "* * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: bb01 + image: busybox:1.35 + - name: bb02 + image: busybox:1.35 + - name: bb03 + image: busybox:1.35 + - name: bb04 + image: busybox:1.35 + restartPolicy: OnFailure + diff --git a/other-cel/limit-containers-per-pod/.chainsaw-test/policy-ready.yaml b/other-cel/limit-containers-per-pod/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..5623d2644 --- /dev/null +++ b/other-cel/limit-containers-per-pod/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: limit-containers-per-pod +status: + ready: true + diff --git a/other-cel/limit-containers-per-pod/.kyverno-test/kyverno-test.yaml b/other-cel/limit-containers-per-pod/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..807b38981 --- /dev/null +++ b/other-cel/limit-containers-per-pod/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,34 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: limit-containers-per-pod +policies: +- ../limit-containers-per-pod.yaml +resources: +- resource.yaml +results: +- kind: CronJob + policy: limit-containers-per-pod + resources: + - mycronjob + result: fail + rule: autogen-cronjob-limit-containers-per-pod +- kind: Deployment + policy: limit-containers-per-pod + resources: + - mydeploy + result: pass + rule: autogen-limit-containers-per-pod +- kind: Pod + policy: limit-containers-per-pod + resources: + - myapp-pod-2 + result: fail + rule: limit-containers-per-pod +- kind: Pod + policy: limit-containers-per-pod + resources: + - myapp-pod-1 + result: pass + rule: limit-containers-per-pod + diff --git a/other-cel/limit-containers-per-pod/.kyverno-test/resource.yaml b/other-cel/limit-containers-per-pod/.kyverno-test/resource.yaml new file mode 100644 index 000000000..e39bdd108 --- /dev/null +++ b/other-cel/limit-containers-per-pod/.kyverno-test/resource.yaml @@ -0,0 +1,76 @@ +apiVersion: v1 +kind: Pod +metadata: + name: myapp-pod-1 + labels: + app: myapp +spec: + containers: + - name: nginx + image: nginx:latest + +--- +apiVersion: v1 +kind: Pod +metadata: + name: myapp-pod-2 + labels: + app: myapp +spec: + containers: + - name: nginx1 + image: nginx:latest + - name: nginx2 + image: nginx:latest + - name: nginx3 + image: nginx:latest + - name: nginx4 + image: nginx:latest + - name: nginx5 + image: nginx:latest + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mydeploy +spec: + replicas: 2 + selector: + matchLabels: + app: myapp + template: + metadata: + labels: + app: myapp + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 + +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: mycronjob +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: hello1 + image: busybox + - name: hello2 + image: busybox + - name: hello3 + image: busybox + - name: hello4 + image: busybox + - name: hello5 + image: busybox + restartPolicy: OnFailure + diff --git a/other-cel/limit-containers-per-pod/artifacthub-pkg.yml b/other-cel/limit-containers-per-pod/artifacthub-pkg.yml new file mode 100644 index 000000000..92aa34409 --- /dev/null +++ b/other-cel/limit-containers-per-pod/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: limit-containers-per-pod-cel +version: 1.0.0 +displayName: Limit Containers per Pod in CEL expressions +description: >- + Pods can have many different containers which are tightly coupled. It may be desirable to limit the amount of containers that can be in a single Pod to control best practice application or so policy can be applied consistently. This policy checks all Pods to ensure they have no more than four containers. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/limit-containers-per-pod/limit-containers-per-pod.yaml + ``` +keywords: + - kyverno + - Sample + - CEL Expressions +readme: | + Pods can have many different containers which are tightly coupled. It may be desirable to limit the amount of containers that can be in a single Pod to control best practice application or so policy can be applied consistently. This policy checks all Pods to ensure they have no more than four containers. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Sample in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "Pod" +digest: 7916672ede794217fb00144785594818cbb66f409c1d2f0d513cfeb944e92ed1 +createdAt: "2024-04-01T15:48:55Z" + diff --git a/other-cel/limit-containers-per-pod/limit-containers-per-pod.yaml b/other-cel/limit-containers-per-pod/limit-containers-per-pod.yaml new file mode 100644 index 000000000..022377acc --- /dev/null +++ b/other-cel/limit-containers-per-pod/limit-containers-per-pod.yaml @@ -0,0 +1,32 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: limit-containers-per-pod + annotations: + policies.kyverno.io/title: Limit Containers per Pod in CEL expressions + policies.kyverno.io/category: Sample in CEL + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + Pods can have many different containers which + are tightly coupled. It may be desirable to limit the amount of containers that + can be in a single Pod to control best practice application or so policy can + be applied consistently. This policy checks all Pods to ensure they have + no more than four containers. +spec: + validationFailureAction: Audit + background: false + rules: + - name: limit-containers-per-pod + match: + any: + - resources: + kinds: + - Pod + validate: + cel: + expressions: + - expression: "size(object.spec.containers) <= 4" + message: "Pods can only have a maximum of 4 containers." + diff --git a/other-cel/limit-hostpath-type-pv/.chainsaw-test/chainsaw-test.yaml b/other-cel/limit-hostpath-type-pv/.chainsaw-test/chainsaw-test.yaml new file mode 100755 index 000000000..1d18ea171 --- /dev/null +++ b/other-cel/limit-hostpath-type-pv/.chainsaw-test/chainsaw-test.yaml @@ -0,0 +1,32 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: limit-hostpath-type-pv +spec: + steps: + - name: step-01 + try: + - apply: + file: ../limit-hostpath-type-pv.yaml + - patch: + resource: + apiVersion: kyverno.io/v1 + kind: ClusterPolicy + metadata: + name: limit-hostpath-type-pv + spec: + validationFailureAction: Enforce + - assert: + file: policy-ready.yaml + - name: step-02 + try: + - apply: + file: pv-good.yaml + - apply: + expect: + - check: + ($error != null): true + file: pv-bad.yaml + diff --git a/other-cel/limit-hostpath-type-pv/.chainsaw-test/policy-ready.yaml b/other-cel/limit-hostpath-type-pv/.chainsaw-test/policy-ready.yaml new file mode 100755 index 000000000..4282df611 --- /dev/null +++ b/other-cel/limit-hostpath-type-pv/.chainsaw-test/policy-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: limit-hostpath-type-pv +status: + ready: true + diff --git a/other-cel/limit-hostpath-type-pv/.chainsaw-test/pv-bad.yaml b/other-cel/limit-hostpath-type-pv/.chainsaw-test/pv-bad.yaml new file mode 100644 index 000000000..022e4ed2c --- /dev/null +++ b/other-cel/limit-hostpath-type-pv/.chainsaw-test/pv-bad.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: bad-pv01 + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + hostPath: + path: "/etc" +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: bad-pv02 + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + hostPath: + path: "/etc/data/home" + diff --git a/other-cel/limit-hostpath-type-pv/.chainsaw-test/pv-good.yaml b/other-cel/limit-hostpath-type-pv/.chainsaw-test/pv-good.yaml new file mode 100644 index 000000000..2dd25ac99 --- /dev/null +++ b/other-cel/limit-hostpath-type-pv/.chainsaw-test/pv-good.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: good-pv01 + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + hostPath: + path: "/data" +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: good-pv02 + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + hostPath: + path: "/data/home" + diff --git a/other-cel/limit-hostpath-type-pv/.kyverno-test/kyverno-test.yaml b/other-cel/limit-hostpath-type-pv/.kyverno-test/kyverno-test.yaml new file mode 100644 index 000000000..c736b0ec1 --- /dev/null +++ b/other-cel/limit-hostpath-type-pv/.kyverno-test/kyverno-test.yaml @@ -0,0 +1,22 @@ +apiVersion: cli.kyverno.io/v1alpha1 +kind: Test +metadata: + name: limit-hostpath-type-pv +policies: +- ../limit-hostpath-type-pv.yaml +resources: +- resource.yaml +results: +- kind: PersistentVolume + policy: limit-hostpath-type-pv + resources: + - bad-pv + result: fail + rule: limit-hostpath-type-pv-to-slash-data +- kind: PersistentVolume + policy: limit-hostpath-type-pv + resources: + - good-pv + result: pass + rule: limit-hostpath-type-pv-to-slash-data + diff --git a/other-cel/limit-hostpath-type-pv/.kyverno-test/resource.yaml b/other-cel/limit-hostpath-type-pv/.kyverno-test/resource.yaml new file mode 100644 index 000000000..dd9a7b597 --- /dev/null +++ b/other-cel/limit-hostpath-type-pv/.kyverno-test/resource.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: good-pv + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + hostPath: + path: "/data" +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: bad-pv + labels: + type: local +spec: + storageClassName: manual + capacity: + storage: 5Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + hostPath: + path: "/etc" + diff --git a/other-cel/limit-hostpath-type-pv/artifacthub-pkg.yml b/other-cel/limit-hostpath-type-pv/artifacthub-pkg.yml new file mode 100644 index 000000000..5d0ee14d9 --- /dev/null +++ b/other-cel/limit-hostpath-type-pv/artifacthub-pkg.yml @@ -0,0 +1,24 @@ +name: limit-hostpath-type-pv-cel +version: 1.0.0 +displayName: Limit hostPath PersistentVolumes to Specific Directories in CEL expressions +description: >- + hostPath persistentvolumes consume the underlying node's file system. If hostPath volumes are not to be universally disabled, they should be restricted to only certain host paths so as not to allow access to sensitive information. This policy ensures the only directory that can be mounted as a hostPath volume is /data. +install: |- + ```shell + kubectl apply -f https://raw.githubusercontent.com/kyverno/policies/main/other-cel/limit-hostpath-type-pv/limit-hostpath-type-pv.yaml + ``` +keywords: + - kyverno + - Other + - CEL Expressions +readme: | + hostPath persistentvolumes consume the underlying node's file system. If hostPath volumes are not to be universally disabled, they should be restricted to only certain host paths so as not to allow access to sensitive information. This policy ensures the only directory that can be mounted as a hostPath volume is /data. + + Refer to the documentation for more details on Kyverno annotations: https://artifacthub.io/docs/topics/annotations/kyverno/ +annotations: + kyverno/category: "Other in CEL" + kyverno/kubernetesVersion: "1.26-1.27" + kyverno/subject: "PersistentVolume" +digest: 981a66b5f77de02d3f6623b49c02421dd1adf4e9882d96a2e0219de9dba52672 +createdAt: "2024-04-04T17:35:35Z" + diff --git a/other-cel/limit-hostpath-type-pv/limit-hostpath-type-pv.yaml b/other-cel/limit-hostpath-type-pv/limit-hostpath-type-pv.yaml new file mode 100644 index 000000000..6004e0e42 --- /dev/null +++ b/other-cel/limit-hostpath-type-pv/limit-hostpath-type-pv.yaml @@ -0,0 +1,32 @@ +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: limit-hostpath-type-pv + annotations: + policies.kyverno.io/title: Limit hostPath PersistentVolumes to Specific Directories in CEL expressions + policies.kyverno.io/category: Other in CEL + policies.kyverno.io/severity: medium + policies.kyverno.io/subject: PersistentVolume + policies.kyverno.io/minversion: 1.11.0 + kyverno.io/kubernetes-version: "1.26-1.27" + policies.kyverno.io/description: >- + hostPath persistentvolumes consume the underlying node's file system. If hostPath volumes + are not to be universally disabled, they should be restricted to only certain + host paths so as not to allow access to sensitive information. This policy ensures + the only directory that can be mounted as a hostPath volume is /data. +spec: + background: false + validationFailureAction: Audit + rules: + - name: limit-hostpath-type-pv-to-slash-data + match: + any: + - resources: + kinds: + - PersistentVolume + validate: + cel: + expressions: + - expression: "!has(object.spec.hostPath) || object.spec.hostPath.path.startsWith('/data')" + message: hostPath type persistent volumes are confined to /data. +