diff --git a/.github/workflows/go-test.yaml b/.github/workflows/go-test.yaml index 544c57902..37f4f9816 100644 --- a/.github/workflows/go-test.yaml +++ b/.github/workflows/go-test.yaml @@ -2,11 +2,11 @@ name: Go Test on: push: paths: - - 'test/**' + - 'test/datadog-operator/**' - 'charts/datadog-operator/**' pull_request: paths: - - 'test/**' + - 'test/datadog-operator/**' - 'charts/datadog-operator/**' env: GO111MODULE: "on" diff --git a/charts/datadog/CHANGELOG.md b/charts/datadog/CHANGELOG.md index d0c62e05f..4932323ff 100644 --- a/charts/datadog/CHANGELOG.md +++ b/charts/datadog/CHANGELOG.md @@ -1,5 +1,9 @@ # Datadog changelog +## 3.59.1 + +* Add support for configuring Agent sidecar injection using Admission Controller. + ## 3.59.0 * Set default `Agent` and `Cluster-Agent` version to `7.52.0`. diff --git a/charts/datadog/Chart.yaml b/charts/datadog/Chart.yaml index 7a24a76b4..b5c5789de 100644 --- a/charts/datadog/Chart.yaml +++ b/charts/datadog/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 name: datadog -version: 3.59.0 +version: 3.59.1 appVersion: "7" description: Datadog Agent keywords: diff --git a/charts/datadog/README.md b/charts/datadog/README.md index 30e41dc28..68efd72a5 100644 --- a/charts/datadog/README.md +++ b/charts/datadog/README.md @@ -1,6 +1,6 @@ # Datadog -![Version: 3.59.0](https://img.shields.io/badge/Version-3.59.0-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square) +![Version: 3.59.1](https://img.shields.io/badge/Version-3.59.1-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square) [Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/). @@ -544,6 +544,14 @@ helm install \ | agents.volumeMounts | list | `[]` | Specify additional volumes to mount in all containers of the agent pod | | agents.volumes | list | `[]` | Specify additional volumes to mount in the dd-agent container | | clusterAgent.additionalLabels | object | `{}` | Adds labels to the Cluster Agent deployment and pods | +| clusterAgent.admissionController.agentSidecarInjection.clusterAgentCommunicationEnabled | bool | `true` | Enable communication between Agent sidecars and the Cluster Agent. | +| clusterAgent.admissionController.agentSidecarInjection.containerRegistry | string | `nil` | | +| clusterAgent.admissionController.agentSidecarInjection.enabled | bool | `false` | Enables Datadog Agent sidecar injection. | +| clusterAgent.admissionController.agentSidecarInjection.imageName | string | `nil` | | +| clusterAgent.admissionController.agentSidecarInjection.imageTag | string | `nil` | | +| clusterAgent.admissionController.agentSidecarInjection.profiles | list | `[]` | Defines the sidecar configuration override, currently only one profile is supported. | +| clusterAgent.admissionController.agentSidecarInjection.provider | string | `nil` | Used by the admission controller to add infrastructure provider-specific configurations to the Agent sidecar. | +| clusterAgent.admissionController.agentSidecarInjection.selectors | list | `[]` | Defines the pod selector for sidecar injection, currently only one rule is supported. | | clusterAgent.admissionController.configMode | string | `nil` | The kind of configuration to be injected, it can be "hostip", "service", or "socket". | | clusterAgent.admissionController.enabled | bool | `true` | Enable the admissionController to be able to inject APM/Dogstatsd config and standard tags (env, service, version) automatically into your pods | | clusterAgent.admissionController.failurePolicy | string | `"Ignore"` | Set the failure policy for dynamic admission control.' | diff --git a/charts/datadog/templates/_ac-agent-sidecar-env.yaml b/charts/datadog/templates/_ac-agent-sidecar-env.yaml new file mode 100644 index 000000000..a2791003d --- /dev/null +++ b/charts/datadog/templates/_ac-agent-sidecar-env.yaml @@ -0,0 +1,50 @@ +{{- define "ac-agent-sidecar-env" -}} +{{- if and .Values.clusterAgent.admissionController.enabled .Values.clusterAgent.admissionController.agentSidecarInjection.enabled }} +- name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_ENABLED + value: "true" +{{- if .Values.clusterAgent.admissionController.agentSidecarInjection.clusterAgentCommunicationEnabled }} +- name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_CLUSTER_AGENT_ENABLED + value: "true" +{{- else }} +- name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_CLUSTER_AGENT_ENABLED + value: "false" +{{- end }} +{{- if .Values.clusterAgent.admissionController.agentSidecarInjection.provider }} +- name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_PROVIDER + value: {{ .Values.clusterAgent.admissionController.agentSidecarInjection.provider }} +{{- end }} + +{{- if .Values.clusterAgent.admissionController.agentSidecarInjection.containerRegistry }} +- name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_CONTAINER_REGISTRY + value: {{ .Values.clusterAgent.admissionController.agentSidecarInjection.containerRegistry }} +{{- else if .Values.registry }} +- name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_CONTAINER_REGISTRY + value: {{ .Values.registry }} +{{- end }} + +{{- if .Values.clusterAgent.admissionController.agentSidecarInjection.imageName }} +- name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_IMAGE_NAME + value: {{ .Values.clusterAgent.admissionController.agentSidecarInjection.imageName }} +{{- else if .Values.agents.image.name}} +- name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_IMAGE_NAME + value: {{ .Values.agents.image.name }} +{{- end }} + +{{- if .Values.clusterAgent.admissionController.agentSidecarInjection.imageTag }} +- name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_IMAGE_TAG + value: {{ .Values.clusterAgent.admissionController.agentSidecarInjection.imageTag }} +{{- else if .Values.agents.image.tag}} +- name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_IMAGE_TAG + value: {{ .Values.agents.image.tag }} +{{- end }} + +{{- if .Values.clusterAgent.admissionController.agentSidecarInjection.selectors }} +- name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_SELECTORS + value: '{{ toJson .Values.clusterAgent.admissionController.agentSidecarInjection.selectors }}' +{{- end }} +{{- if .Values.clusterAgent.admissionController.agentSidecarInjection.profiles }} +- name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_PROFILES + value: '{{ toJson .Values.clusterAgent.admissionController.agentSidecarInjection.profiles }}' +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/datadog/templates/cluster-agent-deployment.yaml b/charts/datadog/templates/cluster-agent-deployment.yaml index c86d8bf45..cd7bd026f 100644 --- a/charts/datadog/templates/cluster-agent-deployment.yaml +++ b/charts/datadog/templates/cluster-agent-deployment.yaml @@ -235,6 +235,7 @@ spec: - name: DD_ADMISSION_CONTROLLER_AUTO_INSTRUMENTATION_PATCHER_ENABLED value: "true" {{- end }} + {{ include "ac-agent-sidecar-env" . | nindent 10 }} - name: DD_REMOTE_CONFIGURATION_ENABLED value: {{ include "clusterAgent-remoteConfiguration-enabled" . | quote }} {{- if .Values.datadog.apm.instrumentation.enabled }} diff --git a/charts/datadog/values.yaml b/charts/datadog/values.yaml index 748be61c2..f39b58904 100644 --- a/charts/datadog/values.yaml +++ b/charts/datadog/values.yaml @@ -1061,6 +1061,56 @@ clusterAgent: # clusterAgent.admissionController.port -- Set port of cluster-agent admission controller service port: 8000 + agentSidecarInjection: + # clusterAgent.admissionController.agentSidecarInjection.enabled -- Enables Datadog Agent sidecar injection. + + ## When enabled, the admission controller mutating webhook will inject an Agent sidecar with minimal configuration in every pod meeting the configured criteria. + enabled: false + + # clusterAgent.admissionController.agentSidecarInjection.provider -- Used by the admission controller to add infrastructure provider-specific configurations to the Agent sidecar. + + ## Currently only "fargate" is supported. To use the feature in other environments (including local testing) omit the config. + ## ref: https://docs.datadoghq.com/integrations/eks_fargate + provider: + + # clusterAgent.admissionController.agentSidecarInjection.clusterAgentCommunicationEnabled -- Enable communication between Agent sidecars and the Cluster Agent. + clusterAgentCommunicationEnabled: true + + # clusterAgent.admissionController.containerRegistry -- Override the default registry for the sidecar Agent. + containerRegistry: + + # clusterAgent.admissionController.imageName -- Override the default agents.image.name for the Agent sidecar. + imageName: + + # clusterAgent.admissionController.imageTag -- Override the default agents.image.tag for the Agent sidecar. + imageTag: + + # clusterAgent.admissionController.agentSidecarInjection.selectors -- Defines the pod selector for sidecar injection, currently only one rule is supported. + selectors: [] + # - objectSelector: + # matchLabels: + # "podlabelKey1": podlabelValue1 + # "podlabelKey2": podlabelValue2 + # namespaceSelector: + # matchLabels: + # "nsLabelKey1": nsLabelValue1 + # "nsLabelKey2": nsLabelValue2 + + # clusterAgent.admissionController.agentSidecarInjection.profiles -- Defines the sidecar configuration override, currently only one profile is supported. + + ## This setting allows overriding the sidecar Agent configuration by adding environment variables and providing resource settings. + profiles: [] + # - env: + # - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + # value: "true" + # resources: + # requests: + # cpu: "1" + # memory: "512Mi" + # limits: + # cpu: "2" + # memory: "1024Mi" + # clusterAgent.confd -- Provide additional cluster check configurations. Each key will become a file in /conf.d. ## ref: https://docs.datadoghq.com/agent/autodiscovery/ diff --git a/test/common/common.go b/test/common/common.go index 07d25354d..c060ae173 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -37,7 +37,7 @@ func RenderChart(t *testing.T, cmd HelmCommand) (string, error) { ValuesFiles: cmd.Values, } - output, err := helm.RenderTemplateE(t, options, chartPath, cmd.ReleaseName, cmd.ShowOnly) + output, err := helm.RenderTemplateE(t, options, chartPath, cmd.ReleaseName, cmd.ShowOnly, "--debug") return output, err } @@ -80,6 +80,12 @@ func CreateSecretFromEnv(t *testing.T, kubectlOptions *k8s.KubectlOptions, apiKe } } +func ReadFile(t *testing.T, filepath string) string { + fileContent, err := os.ReadFile(filepath) + require.NoError(t, err, "can't load manifest from file", "path", filepath) + return string(fileContent) +} + func LoadFromFile[T any](t *testing.T, filepath string, destObj *T) string { fileContent, err := os.ReadFile(filepath) require.NoError(t, err, "can't load manifest from file", "path", filepath) diff --git a/test/datadog/baseline/agent-clusterchecks-deployment_default.yaml b/test/datadog/baseline/agent-clusterchecks-deployment_default.yaml new file mode 100644 index 000000000..7aa229602 --- /dev/null +++ b/test/datadog/baseline/agent-clusterchecks-deployment_default.yaml @@ -0,0 +1,181 @@ +--- +# Source: datadog/templates/agent-clusterchecks-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datadog-clusterchecks + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + app.kubernetes.io/component: clusterchecks-agent + +spec: + replicas: 2 + revisionHistoryLimit: 10 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app: datadog-clusterchecks + template: + metadata: + labels: + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: clusterchecks-agent + admission.datadoghq.com/enabled: "false" + app: datadog-clusterchecks + + name: datadog-clusterchecks + annotations: + checksum/clusteragent_token: 999b326e98e9596150bcbfd45becfdc4695634b0d8198c59d43ce7043ac9a611 + checksum/install_info: 3c5d7a2732f453d72b241f37b74f59319bcbf51e387a8fc35dc47bc4a1a7a390 + spec: + serviceAccountName: datadog-cluster-checks + automountServiceAccountToken: true + imagePullSecrets: + [] + initContainers: + - name: init-volume + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: ["bash", "-c"] + args: + - cp -r /etc/datadog-agent /opt + volumeMounts: + - name: config + mountPath: /opt/datadog-agent + readOnly: false # Need RW for writing agent config files + resources: + {} + - name: init-config + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: ["bash", "-c"] + args: + - for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done + volumeMounts: + - name: config + mountPath: /etc/datadog-agent + readOnly: false # Need RW for writing datadog.yaml config file + resources: + {} + containers: + - name: agent + image: "gcr.io/datadoghq/agent:7.51.0" + command: ["bash", "-c"] + args: + - rm -rf /etc/datadog-agent/conf.d && touch /etc/datadog-agent/datadog.yaml && exec agent run + imagePullPolicy: IfNotPresent + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-secret" + key: api-key + - name: KUBERNETES + value: "yes" + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "clusterchecks" + - name: DD_HEALTH_PORT + value: "5557" + # Cluster checks (cluster-agent communication) + - name: DD_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-cluster-agent + key: token + # Safely run alongside the daemonset + - name: DD_ENABLE_METADATA_COLLECTION + value: "false" + # Expose CLC stats + - name: DD_CLC_RUNNER_ENABLED + value: "true" + - name: DD_CLC_RUNNER_HOST + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: DD_CLC_RUNNER_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + # Remove unused features + - name: DD_USE_DOGSTATSD + value: "false" + - name: DD_PROCESS_AGENT_ENABLED + value: "false" + - name: DD_LOGS_ENABLED + value: "false" + - name: DD_APM_ENABLED + value: "false" + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "false" + - name: DD_HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + + + resources: + {} + volumeMounts: + - name: installinfo + subPath: install_info + mountPath: /etc/datadog-agent/install_info + readOnly: true + - name: config + mountPath: /etc/datadog-agent + readOnly: false # Need RW for config path + livenessProbe: + failureThreshold: 6 + httpGet: + path: /live + port: 5557 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + httpGet: + path: /ready + port: 5557 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + volumes: + - name: installinfo + configMap: + name: datadog-installinfo + - name: config + emptyDir: {} + affinity: + # Prefer scheduling the runners on different nodes if possible + # for better checks stability in case of node failure. + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchLabels: + app: datadog-clusterchecks + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux \ No newline at end of file diff --git a/test/datadog/baseline/cluster-agent-deployment_default.yaml b/test/datadog/baseline/cluster-agent-deployment_default.yaml new file mode 100644 index 000000000..7668108cb --- /dev/null +++ b/test/datadog/baseline/cluster-agent-deployment_default.yaml @@ -0,0 +1,239 @@ +--- +# Source: datadog/templates/cluster-agent-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datadog-cluster-agent + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + app.kubernetes.io/component: cluster-agent + +spec: + replicas: 1 + revisionHistoryLimit: 10 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app: datadog-cluster-agent + template: + metadata: + labels: + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: cluster-agent + admission.datadoghq.com/enabled: "false" + app: datadog-cluster-agent + + name: datadog-cluster-agent + annotations: + checksum/clusteragent_token: cb4b859f7f7c7e495dcca4e6471a201cf8c7eb77134fbcaaf27e5a5b5ef1b9b8 + checksum/clusteragent-configmap: c4ebd3c35d77ac0260f47e1ec10c9733cd76488f4232f76f26466174b922b430 + checksum/api_key: 3c042e07978640da60c9adc10c03acb2e68c176d8f5ecc4c1c8d216051f476a5 + checksum/application_key: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + checksum/install_info: 3c5d7a2732f453d72b241f37b74f59319bcbf51e387a8fc35dc47bc4a1a7a390 + spec: + serviceAccountName: datadog-cluster-agent + automountServiceAccountToken: true + initContainers: + - name: init-volume + image: "gcr.io/datadoghq/cluster-agent:7.51.0" + imagePullPolicy: IfNotPresent + command: + - cp + - -r + args: + - /etc/datadog-agent + - /opt + volumeMounts: + - name: config + mountPath: /opt/datadog-agent + containers: + - name: cluster-agent + image: "gcr.io/datadoghq/cluster-agent:7.51.0" + imagePullPolicy: IfNotPresent + resources: + {} + ports: + - containerPort: 5005 + name: agentport + protocol: TCP + - containerPort: 5000 + name: agentmetrics + protocol: TCP + - containerPort: 8000 + name: datadog-webhook + protocol: TCP + env: + - name: DD_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DD_HEALTH_PORT + value: "5556" + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog" + key: api-key + optional: true + + - name: KUBERNETES + value: "yes" + - name: DD_ADMISSION_CONTROLLER_ENABLED + value: "true" + - name: DD_ADMISSION_CONTROLLER_WEBHOOK_NAME + value: "datadog-webhook" + - name: DD_ADMISSION_CONTROLLER_MUTATE_UNLABELLED + value: "false" + - name: DD_ADMISSION_CONTROLLER_SERVICE_NAME + value: datadog-cluster-agent-admission-controller + - name: DD_ADMISSION_CONTROLLER_INJECT_CONFIG_MODE + value: socket + - name: DD_ADMISSION_CONTROLLER_INJECT_CONFIG_LOCAL_SERVICE_NAME + value: datadog + - name: DD_ADMISSION_CONTROLLER_FAILURE_POLICY + value: "Ignore" + - name: DD_ADMISSION_CONTROLLER_PORT + value: "8000" + + + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "false" + - name: DD_CLUSTER_CHECKS_ENABLED + value: "true" + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "kube_endpoints kube_services" + - name: DD_EXTRA_LISTENERS + value: "kube_endpoints kube_services" + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_LEADER_ELECTION + value: "true" + - name: DD_LEADER_ELECTION_DEFAULT_RESOURCE + value: "configmap" + - name: DD_LEADER_LEASE_NAME + value: datadog-leader-election + - name: DD_CLUSTER_AGENT_TOKEN_NAME + value: datadogtoken + - name: DD_COLLECT_KUBERNETES_EVENTS + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-cluster-agent + key: token + - name: DD_CLUSTER_AGENT_COLLECT_KUBERNETES_TAGS + value: "false" + - name: DD_KUBE_RESOURCES_NAMESPACE + value: datadog-agent + - name: CHART_RELEASE_NAME + value: "datadog" + - name: AGENT_DAEMONSET + value: datadog + - name: CLUSTER_AGENT_DEPLOYMENT + value: datadog-cluster-agent + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "true" + - name: DD_ORCHESTRATOR_EXPLORER_CONTAINER_SCRUBBING_ENABLED + value: "true" + - name: DD_INSTRUMENTATION_INSTALL_TIME + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_time + - name: DD_INSTRUMENTATION_INSTALL_ID + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_id + - name: DD_INSTRUMENTATION_INSTALL_TYPE + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_type + + livenessProbe: + failureThreshold: 6 + httpGet: + path: /live + port: 5556 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + httpGet: + path: /ready + port: 5556 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + volumeMounts: + - name: datadogrun + mountPath: /opt/datadog-agent/run + readOnly: false + - name: varlog + mountPath: /var/log/datadog + readOnly: false + - name: tmpdir + mountPath: /tmp + readOnly: false + - name: installinfo + subPath: install_info + mountPath: /etc/datadog-agent/install_info + readOnly: true + - name: confd + mountPath: /conf.d + readOnly: true + - name: config + mountPath: /etc/datadog-agent + volumes: + - name: datadogrun + emptyDir: {} + - name: varlog + emptyDir: {} + - name: tmpdir + emptyDir: {} + - name: installinfo + configMap: + name: datadog-installinfo + - name: confd + configMap: + name: datadog-cluster-agent-confd + items: + - key: kubernetes_state_core.yaml.default + path: kubernetes_state_core.yaml.default + - name: config + emptyDir: {} + affinity: + # Prefer scheduling the cluster agents on different nodes + # to guarantee that the standby instance can immediately take the lead from a leader running of a faulty node. + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchLabels: + app: datadog-cluster-agent + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux \ No newline at end of file diff --git a/test/datadog/baseline/cluster-agent-deployment_default_advanced_AC_injection.yaml b/test/datadog/baseline/cluster-agent-deployment_default_advanced_AC_injection.yaml new file mode 100644 index 000000000..80702f39b --- /dev/null +++ b/test/datadog/baseline/cluster-agent-deployment_default_advanced_AC_injection.yaml @@ -0,0 +1,253 @@ +--- +# Source: datadog/templates/cluster-agent-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datadog-cluster-agent + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + app.kubernetes.io/component: cluster-agent + +spec: + replicas: 1 + revisionHistoryLimit: 10 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app: datadog-cluster-agent + template: + metadata: + labels: + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: cluster-agent + admission.datadoghq.com/enabled: "false" + app: datadog-cluster-agent + + name: datadog-cluster-agent + annotations: + checksum/clusteragent_token: cc4689bbc5f524d4080b164c621500d7f830d0aeef415c86a3535e76883577a3 + checksum/clusteragent-configmap: c4ebd3c35d77ac0260f47e1ec10c9733cd76488f4232f76f26466174b922b430 + checksum/api_key: 3c042e07978640da60c9adc10c03acb2e68c176d8f5ecc4c1c8d216051f476a5 + checksum/application_key: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + checksum/install_info: 3c5d7a2732f453d72b241f37b74f59319bcbf51e387a8fc35dc47bc4a1a7a390 + spec: + serviceAccountName: datadog-cluster-agent + automountServiceAccountToken: true + initContainers: + - name: init-volume + image: "gcr.io/datadoghq/cluster-agent:7.51.0" + imagePullPolicy: IfNotPresent + command: + - cp + - -r + args: + - /etc/datadog-agent + - /opt + volumeMounts: + - name: config + mountPath: /opt/datadog-agent + containers: + - name: cluster-agent + image: "gcr.io/datadoghq/cluster-agent:7.51.0" + imagePullPolicy: IfNotPresent + resources: + {} + ports: + - containerPort: 5005 + name: agentport + protocol: TCP + - containerPort: 5000 + name: agentmetrics + protocol: TCP + - containerPort: 8000 + name: datadog-webhook + protocol: TCP + env: + - name: DD_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DD_HEALTH_PORT + value: "5556" + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog" + key: api-key + optional: true + + - name: KUBERNETES + value: "yes" + - name: DD_ADMISSION_CONTROLLER_ENABLED + value: "true" + - name: DD_ADMISSION_CONTROLLER_WEBHOOK_NAME + value: "datadog-webhook" + - name: DD_ADMISSION_CONTROLLER_MUTATE_UNLABELLED + value: "false" + - name: DD_ADMISSION_CONTROLLER_SERVICE_NAME + value: datadog-cluster-agent-admission-controller + - name: DD_ADMISSION_CONTROLLER_INJECT_CONFIG_MODE + value: socket + - name: DD_ADMISSION_CONTROLLER_INJECT_CONFIG_LOCAL_SERVICE_NAME + value: datadog + - name: DD_ADMISSION_CONTROLLER_FAILURE_POLICY + value: "Ignore" + - name: DD_ADMISSION_CONTROLLER_PORT + value: "8000" + + + - name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_ENABLED + value: "true" + - name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_CLUSTER_AGENT_ENABLED + value: "false" + - name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_CONTAINER_REGISTRY + value: gcr.io/datadoghq + - name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_IMAGE_NAME + value: agent + - name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_IMAGE_TAG + value: 7.52.0 + - name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_SELECTORS + value: '[{"namespaceSelector":{"matchLabels":{"agentSidecars":"true"}},"objectSelector":{"matchLabels":{"app":"nginx","runsOn":"nodeless"}}}]' + - name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_PROFILES + value: '[{"env":[{"name":"DD_ORCHESTRATOR_EXPLORER_ENABLED","value":"false"},{"name":"DD_TAGS","value":"key1:value1 key2:value2"}],"resources":{"limits":{"cpu":"2","memory":"1024Mi"},"requests":{"cpu":"1","memory":"512Mi"}}}]' + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "false" + - name: DD_CLUSTER_CHECKS_ENABLED + value: "true" + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "kube_endpoints kube_services" + - name: DD_EXTRA_LISTENERS + value: "kube_endpoints kube_services" + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_LEADER_ELECTION + value: "true" + - name: DD_LEADER_ELECTION_DEFAULT_RESOURCE + value: "configmap" + - name: DD_LEADER_LEASE_NAME + value: datadog-leader-election + - name: DD_CLUSTER_AGENT_TOKEN_NAME + value: datadogtoken + - name: DD_COLLECT_KUBERNETES_EVENTS + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-cluster-agent + key: token + - name: DD_CLUSTER_AGENT_COLLECT_KUBERNETES_TAGS + value: "false" + - name: DD_KUBE_RESOURCES_NAMESPACE + value: datadog-agent + - name: CHART_RELEASE_NAME + value: "datadog" + - name: AGENT_DAEMONSET + value: datadog + - name: CLUSTER_AGENT_DEPLOYMENT + value: datadog-cluster-agent + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "true" + - name: DD_ORCHESTRATOR_EXPLORER_CONTAINER_SCRUBBING_ENABLED + value: "true" + - name: DD_INSTRUMENTATION_INSTALL_TIME + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_time + - name: DD_INSTRUMENTATION_INSTALL_ID + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_id + - name: DD_INSTRUMENTATION_INSTALL_TYPE + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_type + + livenessProbe: + failureThreshold: 6 + httpGet: + path: /live + port: 5556 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + httpGet: + path: /ready + port: 5556 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + volumeMounts: + - name: datadogrun + mountPath: /opt/datadog-agent/run + readOnly: false + - name: varlog + mountPath: /var/log/datadog + readOnly: false + - name: tmpdir + mountPath: /tmp + readOnly: false + - name: installinfo + subPath: install_info + mountPath: /etc/datadog-agent/install_info + readOnly: true + - name: confd + mountPath: /conf.d + readOnly: true + - name: config + mountPath: /etc/datadog-agent + volumes: + - name: datadogrun + emptyDir: {} + - name: varlog + emptyDir: {} + - name: tmpdir + emptyDir: {} + - name: installinfo + configMap: + name: datadog-installinfo + - name: confd + configMap: + name: datadog-cluster-agent-confd + items: + - key: kubernetes_state_core.yaml.default + path: kubernetes_state_core.yaml.default + - name: config + emptyDir: {} + affinity: + # Prefer scheduling the cluster agents on different nodes + # to guarantee that the standby instance can immediately take the lead from a leader running of a faulty node. + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchLabels: + app: datadog-cluster-agent + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux \ No newline at end of file diff --git a/test/datadog/baseline/cluster-agent-deployment_default_minimal_AC_injection.yaml b/test/datadog/baseline/cluster-agent-deployment_default_minimal_AC_injection.yaml new file mode 100644 index 000000000..c6d81039a --- /dev/null +++ b/test/datadog/baseline/cluster-agent-deployment_default_minimal_AC_injection.yaml @@ -0,0 +1,249 @@ +--- +# Source: datadog/templates/cluster-agent-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datadog-cluster-agent + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + app.kubernetes.io/component: cluster-agent + +spec: + replicas: 1 + revisionHistoryLimit: 10 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app: datadog-cluster-agent + template: + metadata: + labels: + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: cluster-agent + admission.datadoghq.com/enabled: "false" + app: datadog-cluster-agent + + name: datadog-cluster-agent + annotations: + checksum/clusteragent_token: 20307ad626ccec1c8abc4b8c42089120fea2bcbc8d51ad48f052fc9d7f1ac62e + checksum/clusteragent-configmap: c4ebd3c35d77ac0260f47e1ec10c9733cd76488f4232f76f26466174b922b430 + checksum/api_key: 3c042e07978640da60c9adc10c03acb2e68c176d8f5ecc4c1c8d216051f476a5 + checksum/application_key: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + checksum/install_info: 3c5d7a2732f453d72b241f37b74f59319bcbf51e387a8fc35dc47bc4a1a7a390 + spec: + serviceAccountName: datadog-cluster-agent + automountServiceAccountToken: true + initContainers: + - name: init-volume + image: "gcr.io/datadoghq/cluster-agent:7.51.0" + imagePullPolicy: IfNotPresent + command: + - cp + - -r + args: + - /etc/datadog-agent + - /opt + volumeMounts: + - name: config + mountPath: /opt/datadog-agent + containers: + - name: cluster-agent + image: "gcr.io/datadoghq/cluster-agent:7.51.0" + imagePullPolicy: IfNotPresent + resources: + {} + ports: + - containerPort: 5005 + name: agentport + protocol: TCP + - containerPort: 5000 + name: agentmetrics + protocol: TCP + - containerPort: 8000 + name: datadog-webhook + protocol: TCP + env: + - name: DD_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DD_HEALTH_PORT + value: "5556" + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog" + key: api-key + optional: true + + - name: KUBERNETES + value: "yes" + - name: DD_ADMISSION_CONTROLLER_ENABLED + value: "true" + - name: DD_ADMISSION_CONTROLLER_WEBHOOK_NAME + value: "datadog-webhook" + - name: DD_ADMISSION_CONTROLLER_MUTATE_UNLABELLED + value: "false" + - name: DD_ADMISSION_CONTROLLER_SERVICE_NAME + value: datadog-cluster-agent-admission-controller + - name: DD_ADMISSION_CONTROLLER_INJECT_CONFIG_MODE + value: socket + - name: DD_ADMISSION_CONTROLLER_INJECT_CONFIG_LOCAL_SERVICE_NAME + value: datadog + - name: DD_ADMISSION_CONTROLLER_FAILURE_POLICY + value: "Ignore" + - name: DD_ADMISSION_CONTROLLER_PORT + value: "8000" + + + - name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_ENABLED + value: "true" + - name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_PROVIDER + value: fargate + - name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_IMAGE_NAME + value: agent + - name: DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_IMAGE_TAG + value: 7.51.0 + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "false" + - name: DD_CLUSTER_CHECKS_ENABLED + value: "true" + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "kube_endpoints kube_services" + - name: DD_EXTRA_LISTENERS + value: "kube_endpoints kube_services" + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_LEADER_ELECTION + value: "true" + - name: DD_LEADER_ELECTION_DEFAULT_RESOURCE + value: "configmap" + - name: DD_LEADER_LEASE_NAME + value: datadog-leader-election + - name: DD_CLUSTER_AGENT_TOKEN_NAME + value: datadogtoken + - name: DD_COLLECT_KUBERNETES_EVENTS + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-cluster-agent + key: token + - name: DD_CLUSTER_AGENT_COLLECT_KUBERNETES_TAGS + value: "false" + - name: DD_KUBE_RESOURCES_NAMESPACE + value: datadog-agent + - name: CHART_RELEASE_NAME + value: "datadog" + - name: AGENT_DAEMONSET + value: datadog + - name: CLUSTER_AGENT_DEPLOYMENT + value: datadog-cluster-agent + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "true" + - name: DD_ORCHESTRATOR_EXPLORER_CONTAINER_SCRUBBING_ENABLED + value: "true" + - name: DD_INSTRUMENTATION_INSTALL_TIME + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_time + - name: DD_INSTRUMENTATION_INSTALL_ID + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_id + - name: DD_INSTRUMENTATION_INSTALL_TYPE + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_type + + livenessProbe: + failureThreshold: 6 + httpGet: + path: /live + port: 5556 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + httpGet: + path: /ready + port: 5556 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + volumeMounts: + - name: datadogrun + mountPath: /opt/datadog-agent/run + readOnly: false + - name: varlog + mountPath: /var/log/datadog + readOnly: false + - name: tmpdir + mountPath: /tmp + readOnly: false + - name: installinfo + subPath: install_info + mountPath: /etc/datadog-agent/install_info + readOnly: true + - name: confd + mountPath: /conf.d + readOnly: true + - name: config + mountPath: /etc/datadog-agent + volumes: + - name: datadogrun + emptyDir: {} + - name: varlog + emptyDir: {} + - name: tmpdir + emptyDir: {} + - name: installinfo + configMap: + name: datadog-installinfo + - name: confd + configMap: + name: datadog-cluster-agent-confd + items: + - key: kubernetes_state_core.yaml.default + path: kubernetes_state_core.yaml.default + - name: config + emptyDir: {} + affinity: + # Prefer scheduling the cluster agents on different nodes + # to guarantee that the standby instance can immediately take the lead from a leader running of a faulty node. + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchLabels: + app: datadog-cluster-agent + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux \ No newline at end of file diff --git a/test/datadog/baseline/daemonset_default.yaml b/test/datadog/baseline/daemonset_default.yaml new file mode 100644 index 000000000..7a7c56497 --- /dev/null +++ b/test/datadog/baseline/daemonset_default.yaml @@ -0,0 +1,475 @@ +--- +# Source: datadog/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: datadog + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + app.kubernetes.io/component: agent + +spec: + revisionHistoryLimit: 10 + selector: + matchLabels: + app: datadog + template: + metadata: + labels: + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: agent + admission.datadoghq.com/enabled: "false" + app: datadog + + name: datadog + annotations: + checksum/clusteragent_token: de10bbc694d3c44c7863afc67c2921c921466f00bae249eddfd43d06f0e40e83 + checksum/install_info: 3c5d7a2732f453d72b241f37b74f59319bcbf51e387a8fc35dc47bc4a1a7a390 + checksum/autoconf-config: 74234e98afe7498fb5daf1f36ac2d78acc339464f950703b8c019892f982b90b + checksum/confd-config: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + checksum/checksd-config: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + spec: + + securityContext: + runAsUser: 0 + hostPID: true + containers: + - name: agent + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: ["agent", "run"] + + resources: + {} + ports: + - containerPort: 8125 + name: dogstatsdport + protocol: UDP + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-secret" + key: api-key + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "true" + - name: DD_AUTH_TOKEN_FILE_PATH + value: /etc/datadog-agent/auth/token + + - name: KUBERNETES + value: "yes" + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: DD_OTLP_CONFIG_LOGS_ENABLED + value: "false" + + + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_DOGSTATSD_PORT + value: "8125" + - name: DD_DOGSTATSD_NON_LOCAL_TRAFFIC + value: "true" + - name: DD_DOGSTATSD_TAG_CARDINALITY + value: "low" + - name: DD_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-cluster-agent + key: token + - name: DD_APM_ENABLED + value: "true" + - name: DD_LOGS_ENABLED + value: "false" + - name: DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL + value: "false" + - name: DD_LOGS_CONFIG_K8S_CONTAINER_USE_FILE + value: "true" + - name: DD_LOGS_CONFIG_AUTO_MULTI_LINE_DETECTION + value: "false" + - name: DD_HEALTH_PORT + value: "5555" + - name: DD_DOGSTATSD_SOCKET + value: "/var/run/datadog/dsd.socket" + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "clusterchecks endpointschecks" + - name: DD_IGNORE_AUTOCONF + value: "kubernetes_state" + - name: DD_CONTAINER_LIFECYCLE_ENABLED + value: "true" + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "true" + - name: DD_EXPVAR_PORT + value: "6000" + - name: DD_COMPLIANCE_CONFIG_ENABLED + value: "false" + - name: DD_CONTAINER_IMAGE_ENABLED + value: "true" + volumeMounts: + - name: logdatadog + mountPath: /var/log/datadog + readOnly: false # Need RW to write logs + - name: installinfo + subPath: install_info + mountPath: /etc/datadog-agent/install_info + readOnly: true + - name: tmpdir + mountPath: /tmp + readOnly: false # Need RW to write to /tmp directory + + - name: os-release-file + mountPath: /host/etc/os-release + readOnly: true + - name: config + mountPath: /etc/datadog-agent + readOnly: false # Need RW to mount to config path + - name: auth-token + mountPath: /etc/datadog-agent/auth + readOnly: false # Need RW to write auth token + + - name: runtimesocketdir + mountPath: /host/var/run + mountPropagation: None + readOnly: true + + - name: dsdsocket + mountPath: /var/run/datadog + readOnly: false + - name: procdir + mountPath: /host/proc + mountPropagation: None + readOnly: true + - name: cgroups + mountPath: /host/sys/fs/cgroup + mountPropagation: None + readOnly: true + livenessProbe: + failureThreshold: 6 + httpGet: + path: /live + port: 5555 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + httpGet: + path: /ready + port: 5555 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + - name: trace-agent + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: ["trace-agent", "-config=/etc/datadog-agent/datadog.yaml"] + resources: + {} + ports: + - containerPort: 8126 + name: traceport + protocol: TCP + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-secret" + key: api-key + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "true" + - name: DD_AUTH_TOKEN_FILE_PATH + value: /etc/datadog-agent/auth/token + + - name: KUBERNETES + value: "yes" + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: DD_OTLP_CONFIG_LOGS_ENABLED + value: "false" + + - name: DD_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-cluster-agent + key: token + + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_APM_ENABLED + value: "true" + - name: DD_APM_NON_LOCAL_TRAFFIC + value: "true" + - name: DD_APM_RECEIVER_PORT + value: "8126" + - name: DD_APM_RECEIVER_SOCKET + value: "/var/run/datadog/apm.socket" + - name: DD_DOGSTATSD_SOCKET + value: "/var/run/datadog/dsd.socket" + - name: DD_INSTRUMENTATION_INSTALL_TIME + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_time + - name: DD_INSTRUMENTATION_INSTALL_ID + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_id + - name: DD_INSTRUMENTATION_INSTALL_TYPE + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_type + volumeMounts: + - name: config + mountPath: /etc/datadog-agent + readOnly: true + - name: logdatadog + mountPath: /var/log/datadog + readOnly: false # Need RW to write logs + - name: auth-token + mountPath: /etc/datadog-agent/auth + readOnly: true + - name: procdir + mountPath: /host/proc + mountPropagation: None + readOnly: true + - name: cgroups + mountPath: /host/sys/fs/cgroup + mountPropagation: None + readOnly: true + - name: tmpdir + mountPath: /tmp + readOnly: false # Need RW for tmp directory + - name: dsdsocket + mountPath: /var/run/datadog + readOnly: false # Need RW for UDS DSD socket + + - name: runtimesocketdir + mountPath: /host/var/run + mountPropagation: None + readOnly: true + + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + tcpSocket: + port: 8126 + timeoutSeconds: 5 + - name: process-agent + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: ["process-agent", "--cfgpath=/etc/datadog-agent/datadog.yaml"] + resources: + {} + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-secret" + key: api-key + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "true" + - name: DD_AUTH_TOKEN_FILE_PATH + value: /etc/datadog-agent/auth/token + + - name: KUBERNETES + value: "yes" + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: DD_OTLP_CONFIG_LOGS_ENABLED + value: "false" + + - name: DD_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-cluster-agent + key: token + + - name: DD_PROCESS_AGENT_DISCOVERY_ENABLED + value: "true" + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_SYSTEM_PROBE_ENABLED + value: "false" + - name: DD_DOGSTATSD_SOCKET + value: "/var/run/datadog/dsd.socket" + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "true" + volumeMounts: + - name: config + mountPath: /etc/datadog-agent + readOnly: true + - name: logdatadog + mountPath: /var/log/datadog + readOnly: false # Need RW to write logs + - name: auth-token + mountPath: /etc/datadog-agent/auth + readOnly: true + - name: dsdsocket + mountPath: /var/run/datadog + readOnly: false # Need RW for UDS DSD socket + - name: tmpdir + mountPath: /tmp + readOnly: false # Need RW to write to tmp directory + + - name: os-release-file + mountPath: /host/etc/os-release + readOnly: true + + - name: runtimesocketdir + mountPath: /host/var/run + mountPropagation: None + readOnly: true + + - name: cgroups + mountPath: /host/sys/fs/cgroup + mountPropagation: None + readOnly: true + - name: passwd + mountPath: /etc/passwd + readOnly: true + - name: procdir + mountPath: /host/proc + mountPropagation: None + readOnly: true + initContainers: + + - name: init-volume + + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: ["bash", "-c"] + args: + - cp -r /etc/datadog-agent /opt + volumeMounts: + - name: config + mountPath: /opt/datadog-agent + readOnly: false # Need RW for config path + resources: + {} + - name: init-config + + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: + - bash + - -c + args: + - for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done + volumeMounts: + - name: logdatadog + mountPath: /var/log/datadog + readOnly: false # Need RW to write logs + - name: config + mountPath: /etc/datadog-agent + readOnly: false # Need RW for config path + - name: procdir + mountPath: /host/proc + mountPropagation: None + readOnly: true + + - name: runtimesocketdir + mountPath: /host/var/run + mountPropagation: None + readOnly: true + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-secret" + key: api-key + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "true" + - name: DD_AUTH_TOKEN_FILE_PATH + value: /etc/datadog-agent/auth/token + + - name: KUBERNETES + value: "yes" + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: DD_OTLP_CONFIG_LOGS_ENABLED + value: "false" + + resources: + {} + volumes: + - name: auth-token + emptyDir: {} + - name: installinfo + configMap: + name: datadog-installinfo + - name: config + emptyDir: {} + + - name: logdatadog + emptyDir: {} + - name: tmpdir + emptyDir: {} + - hostPath: + path: /proc + name: procdir + - hostPath: + path: /sys/fs/cgroup + name: cgroups + - hostPath: + path: /etc/os-release + name: os-release-file + - hostPath: + path: /var/run/datadog/ + type: DirectoryOrCreate + name: dsdsocket + - hostPath: + path: /var/run/datadog/ + type: DirectoryOrCreate + name: apmsocket + - name: s6-run + emptyDir: {} + - hostPath: + path: /etc/passwd + name: passwd + - hostPath: + path: /var/run + name: runtimesocketdir + tolerations: + affinity: + {} + serviceAccountName: "datadog" + automountServiceAccountToken: true + nodeSelector: + kubernetes.io/os: linux + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + type: RollingUpdate \ No newline at end of file diff --git a/test/datadog/baseline/other_default.yaml b/test/datadog/baseline/other_default.yaml new file mode 100644 index 000000000..b78f8c31b --- /dev/null +++ b/test/datadog/baseline/other_default.yaml @@ -0,0 +1,1674 @@ +--- +# Source: datadog/templates/agent-clusterchecks-pdb.yaml +apiVersion: "policy/v1" +kind: PodDisruptionBudget +metadata: + name: datadog-clusterchecks + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" +spec: + maxUnavailable: 1 + selector: + matchLabels: + app: datadog-clusterchecks +--- +# Source: datadog/templates/cluster-agent-pdb.yaml +apiVersion: "policy/v1" +kind: PodDisruptionBudget +metadata: + name: datadog-cluster-agent + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" +spec: + minAvailable: 1 + selector: + matchLabels: + app: datadog-cluster-agent +--- +# Source: datadog/templates/agent-clusterchecks-rbac.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + app: "datadog" + chart: "datadog-3.58.2" + heritage: "Helm" + release: "datadog" + name: datadog-cluster-checks + namespace: datadog-agent +--- +# Source: datadog/templates/cluster-agent-rbac.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + labels: + app: "datadog" + chart: "datadog-3.58.2" + heritage: "Helm" + release: "datadog" + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + name: datadog-cluster-agent + namespace: datadog-agent +--- +# Source: datadog/templates/rbac.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: datadog + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" # end range $role := .Values.datadog.secretBackend.roles +--- +# Source: datadog/templates/secret-cluster-agent-token.yaml +apiVersion: v1 +kind: Secret +metadata: + name: datadog-cluster-agent + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" +type: Opaque +data: + token: "TkNSTkV5UlJqdjhHV0NWcmduMmRBaWdqTUw2WmdsV2g=" +--- +# Source: datadog/templates/cluster-agent-confd-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: datadog-cluster-agent-confd + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + annotations: + checksum/confd-config: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a +data: + kubernetes_state_core.yaml.default: |- + cluster_check: true + init_config: + instances: + - collectors: + - secrets + - configmaps + - nodes + - pods + - services + - resourcequotas + - replicationcontrollers + - limitranges + - persistentvolumeclaims + - persistentvolumes + - namespaces + - endpoints + - daemonsets + - deployments + - replicasets + - statefulsets + - cronjobs + - jobs + - horizontalpodautoscalers + - poddisruptionbudgets + - storageclasses + - volumeattachments + - ingresses + skip_leader_election: true + labels_as_tags: + {} + annotations_as_tags: + {} +--- +# Source: datadog/templates/install_info-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: datadog-installinfo + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + annotations: + checksum/install_info: 3c5d7a2732f453d72b241f37b74f59319bcbf51e387a8fc35dc47bc4a1a7a390 +data: + install_info: | + --- + install_method: + tool: helm + tool_version: Helm + installer_version: datadog-3.58.2 +--- +# Source: datadog/templates/kpi-telemetry-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: datadog-kpi-telemetry-configmap + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" +data: + install_id: "fd55046d-8dfc-47a6-9fb7-20088a93ea58" + install_type: k8s_manual + install_time: "1710950775" +--- +# Source: datadog/templates/cluster-agent-rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: ClusterRole +metadata: + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + name: datadog-cluster-agent +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + - nodes + - namespaces + - componentstatuses + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: ["quota.openshift.io"] + resources: + - clusterresourcequotas + verbs: + - get + - list +- apiGroups: + - "autoscaling" + resources: + - horizontalpodautoscalers + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - datadogtoken # Kubernetes event collection state + - datadogtoken # Kept for backward compatibility with agent <7.37.0 + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - datadog-leader-election # Leader election token + - datadog-leader-election # Kept for backward compatibility with agent <7.37.0 + verbs: + - get + - update +- apiGroups: + - "coordination.k8s.io" + resources: + - leases + resourceNames: + - datadog-leader-election # Leader election token + verbs: + - get + - update +- apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - create +- apiGroups: # To create the leader election token and hpa events + - "" + resources: + - configmaps + - events + verbs: + - create +- nonResourceURLs: + - "/version" + - "/healthz" + verbs: + - get +- apiGroups: # to get the kube-system namespace UID and generate a cluster ID + - "" + resources: + - namespaces + resourceNames: + - "kube-system" + verbs: + - get +- apiGroups: # To create the cluster-id configmap + - "" + resources: + - configmaps + resourceNames: + - "datadog-cluster-id" + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - persistentvolumes + - persistentvolumeclaims + - serviceaccounts + verbs: + - list + - get + - watch +- apiGroups: + - "apps" + resources: + - deployments + - replicasets + - daemonsets + - statefulsets + verbs: + - list + - get + - watch +- apiGroups: + - "batch" + resources: + - cronjobs + - jobs + verbs: + - list + - get + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + - networkpolicies + verbs: + - list + - get + - watch +- apiGroups: + - "rbac.authorization.k8s.io" + resources: + - roles + - rolebindings + - clusterroles + - clusterrolebindings + verbs: + - list + - get + - watch +- apiGroups: + - autoscaling.k8s.io + resources: + - verticalpodautoscalers + verbs: + - list + - get + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - get + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + resourceNames: + - "datadog-webhook" + verbs: ["get", "list", "watch", "update"] +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: ["create"] +- apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["get"] +- apiGroups: ["apps"] + resources: ["statefulsets", "replicasets", "deployments", "daemonsets"] + verbs: ["get"] +- apiGroups: + - "security.openshift.io" + resources: + - securitycontextconstraints + verbs: + - use + resourceNames: + - datadog-cluster-agent + - hostnetwork +--- +# Source: datadog/templates/kube-state-metrics-core-rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: ClusterRole +metadata: + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + name: datadog-ksm-core +rules: +- apiGroups: + - "" + resources: + - secrets + - configmaps + - nodes + - pods + - services + - resourcequotas + - replicationcontrollers + - limitranges + - persistentvolumeclaims + - persistentvolumes + - namespaces + - endpoints + - events + verbs: + - list + - watch +- apiGroups: + - extensions + resources: + - daemonsets + - deployments + - replicasets + verbs: + - list + - watch +- apiGroups: + - apps + resources: + - statefulsets + - daemonsets + - deployments + - replicasets + verbs: + - list + - watch +- apiGroups: + - batch + resources: + - cronjobs + - jobs + verbs: + - list + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - list + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - list + - watch +- apiGroups: + - storage.k8s.io + resources: + - storageclasses + - volumeattachments + verbs: + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - watch +--- +# Source: datadog/templates/rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: ClusterRole +metadata: + name: datadog + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" +rules: +- nonResourceURLs: + - "/metrics" + - "/metrics/slis" + verbs: + - get +- apiGroups: # Kubelet connectivity + - "" + resources: + - nodes/metrics + - nodes/spec + - nodes/proxy + - nodes/stats + verbs: + - get +- apiGroups: # leader election check + - "" + resources: + - endpoints + verbs: + - get +- apiGroups: + - "security.openshift.io" + resources: + - securitycontextconstraints + verbs: + - use + resourceNames: + - datadog + - hostaccess + - privileged +- apiGroups: # leader election check + - "coordination.k8s.io" + resources: + - leases + verbs: + - get +--- +# Source: datadog/templates/agent-clusterchecks-rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + name: datadog-cluster-checks +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: datadog +subjects: + - kind: ServiceAccount + name: datadog-cluster-checks + namespace: datadog-agent +--- +# Source: datadog/templates/cluster-agent-rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + name: datadog-cluster-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: datadog-cluster-agent +subjects: + - kind: ServiceAccount + name: datadog-cluster-agent + namespace: datadog-agent +--- +# Source: datadog/templates/kube-state-metrics-core-rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + name: datadog-ksm-core +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: datadog-ksm-core +subjects: + - kind: ServiceAccount + name: datadog-cluster-checks + namespace: datadog-agent +--- +# Source: datadog/templates/rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: ClusterRoleBinding +metadata: + name: datadog + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: datadog +subjects: + - kind: ServiceAccount + name: datadog + namespace: datadog-agent +--- +# Source: datadog/templates/cluster-agent-rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: Role +metadata: + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + name: datadog-cluster-agent-main + namespace: datadog-agent +rules: +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "update", "create"] +--- +# Source: datadog/templates/dca-helm-values-rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: Role +metadata: + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + name: datadog-dca-flare + namespace: datadog-agent +rules: +- apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - get + - list +--- +# Source: datadog/templates/cluster-agent-rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: RoleBinding +metadata: + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + name: "datadog-cluster-agent-main" + namespace: datadog-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: datadog-cluster-agent-main +subjects: + - kind: ServiceAccount + name: datadog-cluster-agent + namespace: datadog-agent +--- +# Source: datadog/templates/dca-helm-values-rbac.yaml +apiVersion: "rbac.authorization.k8s.io/v1" +kind: RoleBinding +metadata: + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + name: datadog-dca-flare + namespace: datadog-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: datadog-dca-flare +subjects: + - kind: ServiceAccount + name: datadog-cluster-agent + namespace: datadog-agent +--- +# Source: datadog/templates/agent-services.yaml +apiVersion: v1 +kind: Service +metadata: + name: datadog-cluster-agent + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" +spec: + type: ClusterIP + selector: + app: datadog-cluster-agent + ports: + - port: 5005 + name: agentport + protocol: TCP +--- +# Source: datadog/templates/agent-services.yaml +apiVersion: v1 +kind: Service +metadata: + name: datadog-cluster-agent-admission-controller + namespace: datadog-agent + labels: + app: "datadog" + chart: "datadog-3.58.2" + release: "datadog" + heritage: "Helm" + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" +spec: + selector: + app: datadog-cluster-agent + ports: + - port: 443 + targetPort: 8000 + name: datadog-webhook + protocol: TCP +--- +# Source: datadog/templates/agent-services.yaml +apiVersion: v1 +kind: Service + +metadata: + name: datadog + namespace: datadog-agent + labels: + app: "datadog" + chart: "datadog-3.58.2" + release: "datadog" + heritage: "Helm" + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" +spec: + selector: + app: datadog + ports: + - protocol: UDP + port: 8125 + targetPort: 8125 + name: dogstatsdport + - protocol: TCP + port: 8126 + targetPort: 8126 + name: traceport + internalTrafficPolicy: Local +--- +# Source: datadog/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: datadog + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + app.kubernetes.io/component: agent + +spec: + revisionHistoryLimit: 10 + selector: + matchLabels: + app: datadog + template: + metadata: + labels: + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: agent + admission.datadoghq.com/enabled: "false" + app: datadog + + name: datadog + annotations: + checksum/clusteragent_token: d1284848a462e5feb13cfbc55a8d3eb48c477b6916832a592867cb5a4a9e4969 + checksum/install_info: 3c5d7a2732f453d72b241f37b74f59319bcbf51e387a8fc35dc47bc4a1a7a390 + checksum/autoconf-config: 74234e98afe7498fb5daf1f36ac2d78acc339464f950703b8c019892f982b90b + checksum/confd-config: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + checksum/checksd-config: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + spec: + + securityContext: + runAsUser: 0 + hostPID: true + containers: + - name: agent + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: ["agent", "run"] + + resources: + {} + ports: + - containerPort: 8125 + name: dogstatsdport + protocol: UDP + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-secret" + key: api-key + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "true" + - name: DD_AUTH_TOKEN_FILE_PATH + value: /etc/datadog-agent/auth/token + + - name: KUBERNETES + value: "yes" + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: DD_OTLP_CONFIG_LOGS_ENABLED + value: "false" + + + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_DOGSTATSD_PORT + value: "8125" + - name: DD_DOGSTATSD_NON_LOCAL_TRAFFIC + value: "true" + - name: DD_DOGSTATSD_TAG_CARDINALITY + value: "low" + - name: DD_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-cluster-agent + key: token + - name: DD_APM_ENABLED + value: "true" + - name: DD_LOGS_ENABLED + value: "false" + - name: DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL + value: "false" + - name: DD_LOGS_CONFIG_K8S_CONTAINER_USE_FILE + value: "true" + - name: DD_LOGS_CONFIG_AUTO_MULTI_LINE_DETECTION + value: "false" + - name: DD_HEALTH_PORT + value: "5555" + - name: DD_DOGSTATSD_SOCKET + value: "/var/run/datadog/dsd.socket" + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "endpointschecks" + + - name: DD_IGNORE_AUTOCONF + value: "kubernetes_state" + - name: DD_CONTAINER_LIFECYCLE_ENABLED + value: "true" + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "true" + - name: DD_EXPVAR_PORT + value: "6000" + - name: DD_COMPLIANCE_CONFIG_ENABLED + value: "false" + - name: DD_CONTAINER_IMAGE_ENABLED + value: "true" + volumeMounts: + - name: logdatadog + mountPath: /var/log/datadog + readOnly: false # Need RW to write logs + - name: installinfo + subPath: install_info + mountPath: /etc/datadog-agent/install_info + readOnly: true + - name: tmpdir + mountPath: /tmp + readOnly: false # Need RW to write to /tmp directory + + - name: os-release-file + mountPath: /host/etc/os-release + readOnly: true + - name: config + mountPath: /etc/datadog-agent + readOnly: false # Need RW to mount to config path + - name: auth-token + mountPath: /etc/datadog-agent/auth + readOnly: false # Need RW to write auth token + + - name: runtimesocketdir + mountPath: /host/var/run + mountPropagation: None + readOnly: true + + - name: dsdsocket + mountPath: /var/run/datadog + readOnly: false + - name: procdir + mountPath: /host/proc + mountPropagation: None + readOnly: true + - name: cgroups + mountPath: /host/sys/fs/cgroup + mountPropagation: None + readOnly: true + livenessProbe: + failureThreshold: 6 + httpGet: + path: /live + port: 5555 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + httpGet: + path: /ready + port: 5555 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + - name: trace-agent + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: ["trace-agent", "-config=/etc/datadog-agent/datadog.yaml"] + resources: + {} + ports: + - containerPort: 8126 + name: traceport + protocol: TCP + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-secret" + key: api-key + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "true" + - name: DD_AUTH_TOKEN_FILE_PATH + value: /etc/datadog-agent/auth/token + + - name: KUBERNETES + value: "yes" + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: DD_OTLP_CONFIG_LOGS_ENABLED + value: "false" + + - name: DD_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-cluster-agent + key: token + + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_APM_ENABLED + value: "true" + - name: DD_APM_NON_LOCAL_TRAFFIC + value: "true" + - name: DD_APM_RECEIVER_PORT + value: "8126" + - name: DD_APM_RECEIVER_SOCKET + value: "/var/run/datadog/apm.socket" + - name: DD_DOGSTATSD_SOCKET + value: "/var/run/datadog/dsd.socket" + - name: DD_INSTRUMENTATION_INSTALL_TIME + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_time + - name: DD_INSTRUMENTATION_INSTALL_ID + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_id + - name: DD_INSTRUMENTATION_INSTALL_TYPE + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_type + volumeMounts: + - name: config + mountPath: /etc/datadog-agent + readOnly: true + - name: logdatadog + mountPath: /var/log/datadog + readOnly: false # Need RW to write logs + - name: auth-token + mountPath: /etc/datadog-agent/auth + readOnly: true + - name: procdir + mountPath: /host/proc + mountPropagation: None + readOnly: true + - name: cgroups + mountPath: /host/sys/fs/cgroup + mountPropagation: None + readOnly: true + - name: tmpdir + mountPath: /tmp + readOnly: false # Need RW for tmp directory + - name: dsdsocket + mountPath: /var/run/datadog + readOnly: false # Need RW for UDS DSD socket + + - name: runtimesocketdir + mountPath: /host/var/run + mountPropagation: None + readOnly: true + + livenessProbe: + initialDelaySeconds: 15 + periodSeconds: 15 + tcpSocket: + port: 8126 + timeoutSeconds: 5 + - name: process-agent + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: ["process-agent", "--cfgpath=/etc/datadog-agent/datadog.yaml"] + resources: + {} + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-secret" + key: api-key + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "true" + - name: DD_AUTH_TOKEN_FILE_PATH + value: /etc/datadog-agent/auth/token + + - name: KUBERNETES + value: "yes" + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: DD_OTLP_CONFIG_LOGS_ENABLED + value: "false" + + - name: DD_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-cluster-agent + key: token + + - name: DD_PROCESS_AGENT_DISCOVERY_ENABLED + value: "true" + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_SYSTEM_PROBE_ENABLED + value: "false" + - name: DD_DOGSTATSD_SOCKET + value: "/var/run/datadog/dsd.socket" + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "true" + volumeMounts: + - name: config + mountPath: /etc/datadog-agent + readOnly: true + - name: logdatadog + mountPath: /var/log/datadog + readOnly: false # Need RW to write logs + - name: auth-token + mountPath: /etc/datadog-agent/auth + readOnly: true + - name: dsdsocket + mountPath: /var/run/datadog + readOnly: false # Need RW for UDS DSD socket + - name: tmpdir + mountPath: /tmp + readOnly: false # Need RW to write to tmp directory + + - name: os-release-file + mountPath: /host/etc/os-release + readOnly: true + + - name: runtimesocketdir + mountPath: /host/var/run + mountPropagation: None + readOnly: true + + - name: cgroups + mountPath: /host/sys/fs/cgroup + mountPropagation: None + readOnly: true + - name: passwd + mountPath: /etc/passwd + readOnly: true + - name: procdir + mountPath: /host/proc + mountPropagation: None + readOnly: true + initContainers: + + - name: init-volume + + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: ["bash", "-c"] + args: + - cp -r /etc/datadog-agent /opt + volumeMounts: + - name: config + mountPath: /opt/datadog-agent + readOnly: false # Need RW for config path + resources: + {} + - name: init-config + + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: + - bash + - -c + args: + - for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done + volumeMounts: + - name: logdatadog + mountPath: /var/log/datadog + readOnly: false # Need RW to write logs + - name: config + mountPath: /etc/datadog-agent + readOnly: false # Need RW for config path + - name: procdir + mountPath: /host/proc + mountPropagation: None + readOnly: true + + - name: runtimesocketdir + mountPath: /host/var/run + mountPropagation: None + readOnly: true + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-secret" + key: api-key + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "true" + - name: DD_AUTH_TOKEN_FILE_PATH + value: /etc/datadog-agent/auth/token + + - name: KUBERNETES + value: "yes" + - name: DD_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: DD_OTLP_CONFIG_LOGS_ENABLED + value: "false" + + resources: + {} + volumes: + - name: auth-token + emptyDir: {} + - name: installinfo + configMap: + name: datadog-installinfo + - name: config + emptyDir: {} + + - name: logdatadog + emptyDir: {} + - name: tmpdir + emptyDir: {} + - hostPath: + path: /proc + name: procdir + - hostPath: + path: /sys/fs/cgroup + name: cgroups + - hostPath: + path: /etc/os-release + name: os-release-file + - hostPath: + path: /var/run/datadog/ + type: DirectoryOrCreate + name: dsdsocket + - hostPath: + path: /var/run/datadog/ + type: DirectoryOrCreate + name: apmsocket + - name: s6-run + emptyDir: {} + - hostPath: + path: /etc/passwd + name: passwd + - hostPath: + path: /var/run + name: runtimesocketdir + tolerations: + affinity: + {} + serviceAccountName: "datadog" + automountServiceAccountToken: true + nodeSelector: + kubernetes.io/os: linux + updateStrategy: + rollingUpdate: + maxUnavailable: 10% + type: RollingUpdate +--- +# Source: datadog/templates/agent-clusterchecks-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datadog-clusterchecks + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + app.kubernetes.io/component: clusterchecks-agent + +spec: + replicas: 2 + revisionHistoryLimit: 10 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app: datadog-clusterchecks + template: + metadata: + labels: + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: clusterchecks-agent + admission.datadoghq.com/enabled: "false" + app: datadog-clusterchecks + + name: datadog-clusterchecks + annotations: + checksum/clusteragent_token: d56cd35d2ed589e9817382c6d02cf18a1d24772863e156f83e78a038ee24b51d + checksum/install_info: 3c5d7a2732f453d72b241f37b74f59319bcbf51e387a8fc35dc47bc4a1a7a390 + spec: + serviceAccountName: datadog-cluster-checks + automountServiceAccountToken: true + imagePullSecrets: + [] + initContainers: + - name: init-volume + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: ["bash", "-c"] + args: + - cp -r /etc/datadog-agent /opt + volumeMounts: + - name: config + mountPath: /opt/datadog-agent + readOnly: false # Need RW for writing agent config files + resources: + {} + - name: init-config + image: "gcr.io/datadoghq/agent:7.51.0" + imagePullPolicy: IfNotPresent + command: ["bash", "-c"] + args: + - for script in $(find /etc/cont-init.d/ -type f -name '*.sh' | sort) ; do bash $script ; done + volumeMounts: + - name: config + mountPath: /etc/datadog-agent + readOnly: false # Need RW for writing datadog.yaml config file + resources: + {} + containers: + - name: agent + image: "gcr.io/datadoghq/agent:7.51.0" + command: ["bash", "-c"] + args: + - rm -rf /etc/datadog-agent/conf.d && touch /etc/datadog-agent/datadog.yaml && exec agent run + imagePullPolicy: IfNotPresent + env: + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-secret" + key: api-key + - name: KUBERNETES + value: "yes" + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "clusterchecks" + - name: DD_HEALTH_PORT + value: "5557" + # Cluster checks (cluster-agent communication) + - name: DD_CLUSTER_AGENT_ENABLED + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-cluster-agent + key: token + # Safely run alongside the daemonset + - name: DD_ENABLE_METADATA_COLLECTION + value: "false" + # Expose CLC stats + - name: DD_CLC_RUNNER_ENABLED + value: "true" + - name: DD_CLC_RUNNER_HOST + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: DD_CLC_RUNNER_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + # Remove unused features + - name: DD_USE_DOGSTATSD + value: "false" + - name: DD_PROCESS_AGENT_ENABLED + value: "false" + - name: DD_LOGS_ENABLED + value: "false" + - name: DD_APM_ENABLED + value: "false" + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "false" + - name: DD_HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + + + resources: + {} + volumeMounts: + - name: installinfo + subPath: install_info + mountPath: /etc/datadog-agent/install_info + readOnly: true + - name: config + mountPath: /etc/datadog-agent + readOnly: false # Need RW for config path + livenessProbe: + failureThreshold: 6 + httpGet: + path: /live + port: 5557 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + httpGet: + path: /ready + port: 5557 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + volumes: + - name: installinfo + configMap: + name: datadog-installinfo + - name: config + emptyDir: {} + affinity: + # Prefer scheduling the runners on different nodes if possible + # for better checks stability in case of node failure. + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchLabels: + app: datadog-clusterchecks + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux +--- +# Source: datadog/templates/cluster-agent-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: datadog-cluster-agent + namespace: datadog-agent + labels: + helm.sh/chart: 'datadog-3.58.2' + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: "7" + app.kubernetes.io/component: cluster-agent + +spec: + replicas: 1 + revisionHistoryLimit: 10 + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app: datadog-cluster-agent + template: + metadata: + labels: + app.kubernetes.io/name: "datadog" + app.kubernetes.io/instance: "datadog" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: cluster-agent + admission.datadoghq.com/enabled: "false" + app: datadog-cluster-agent + + name: datadog-cluster-agent + annotations: + checksum/clusteragent_token: 9f86485d7f07627ccbd29f3ad3996ceb1fa57403f055c1b15e7c6919f944a2c1 + checksum/clusteragent-configmap: 4bd988547c8c6cc1f177d466bee234b3d51f96bd8f1177244bd0cca5cdea8082 + checksum/install_info: 3c5d7a2732f453d72b241f37b74f59319bcbf51e387a8fc35dc47bc4a1a7a390 + spec: + serviceAccountName: datadog-cluster-agent + automountServiceAccountToken: true + initContainers: + - name: init-volume + image: "gcr.io/datadoghq/cluster-agent:7.51.0" + imagePullPolicy: IfNotPresent + command: + - cp + - -r + args: + - /etc/datadog-agent + - /opt + volumeMounts: + - name: config + mountPath: /opt/datadog-agent + containers: + - name: cluster-agent + image: "gcr.io/datadoghq/cluster-agent:7.51.0" + imagePullPolicy: IfNotPresent + resources: + {} + ports: + - containerPort: 5005 + name: agentport + protocol: TCP + - containerPort: 5000 + name: agentmetrics + protocol: TCP + - containerPort: 8000 + name: datadog-webhook + protocol: TCP + env: + - name: DD_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DD_HEALTH_PORT + value: "5556" + - name: DD_API_KEY + valueFrom: + secretKeyRef: + name: "datadog-secret" + key: api-key + optional: true + + - name: KUBERNETES + value: "yes" + - name: DD_ADMISSION_CONTROLLER_ENABLED + value: "true" + - name: DD_ADMISSION_CONTROLLER_WEBHOOK_NAME + value: "datadog-webhook" + - name: DD_ADMISSION_CONTROLLER_MUTATE_UNLABELLED + value: "false" + - name: DD_ADMISSION_CONTROLLER_SERVICE_NAME + value: datadog-cluster-agent-admission-controller + - name: DD_ADMISSION_CONTROLLER_INJECT_CONFIG_MODE + value: socket + - name: DD_ADMISSION_CONTROLLER_INJECT_CONFIG_LOCAL_SERVICE_NAME + value: datadog + - name: DD_ADMISSION_CONTROLLER_FAILURE_POLICY + value: "Ignore" + - name: DD_ADMISSION_CONTROLLER_PORT + value: "8000" + + + - name: DD_REMOTE_CONFIGURATION_ENABLED + value: "false" + - name: DD_CLUSTER_CHECKS_ENABLED + value: "true" + - name: DD_EXTRA_CONFIG_PROVIDERS + value: "kube_endpoints kube_services" + - name: DD_EXTRA_LISTENERS + value: "kube_endpoints kube_services" + - name: DD_LOG_LEVEL + value: "INFO" + - name: DD_LEADER_ELECTION + value: "true" + - name: DD_LEADER_ELECTION_DEFAULT_RESOURCE + value: "configmap" + - name: DD_LEADER_LEASE_DURATION + value: "15" + - name: DD_LEADER_LEASE_NAME + value: datadog-leader-election + - name: DD_CLUSTER_AGENT_TOKEN_NAME + value: datadogtoken + - name: DD_COLLECT_KUBERNETES_EVENTS + value: "true" + - name: DD_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: datadog-cluster-agent + - name: DD_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: datadog-cluster-agent + key: token + - name: DD_CLUSTER_AGENT_COLLECT_KUBERNETES_TAGS + value: "false" + - name: DD_KUBE_RESOURCES_NAMESPACE + value: datadog-agent + - name: CHART_RELEASE_NAME + value: "datadog" + - name: AGENT_DAEMONSET + value: datadog + - name: CLUSTER_AGENT_DEPLOYMENT + value: datadog-cluster-agent + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "true" + - name: DD_ORCHESTRATOR_EXPLORER_CONTAINER_SCRUBBING_ENABLED + value: "true" + - name: DD_INSTRUMENTATION_INSTALL_TIME + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_time + - name: DD_INSTRUMENTATION_INSTALL_ID + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_id + - name: DD_INSTRUMENTATION_INSTALL_TYPE + valueFrom: + configMapKeyRef: + name: datadog-kpi-telemetry-configmap + key: install_type + + livenessProbe: + failureThreshold: 6 + httpGet: + path: /live + port: 5556 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + httpGet: + path: /ready + port: 5556 + scheme: HTTP + initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 5 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + volumeMounts: + - name: datadogrun + mountPath: /opt/datadog-agent/run + readOnly: false + - name: varlog + mountPath: /var/log/datadog + readOnly: false + - name: tmpdir + mountPath: /tmp + readOnly: false + - name: installinfo + subPath: install_info + mountPath: /etc/datadog-agent/install_info + readOnly: true + - name: confd + mountPath: /conf.d + readOnly: true + - name: config + mountPath: /etc/datadog-agent + volumes: + - name: datadogrun + emptyDir: {} + - name: varlog + emptyDir: {} + - name: tmpdir + emptyDir: {} + - name: installinfo + configMap: + name: datadog-installinfo + - name: confd + configMap: + name: datadog-cluster-agent-confd + items: + - key: kubernetes_state_core.yaml.default + path: kubernetes_state_core.yaml.default + - name: config + emptyDir: {} + affinity: + # Prefer scheduling the cluster agents on different nodes + # to guarantee that the standby instance can immediately take the lead from a leader running of a faulty node. + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 50 + podAffinityTerm: + labelSelector: + matchLabels: + app: datadog-cluster-agent + topologyKey: kubernetes.io/hostname + nodeSelector: + kubernetes.io/os: linux \ No newline at end of file diff --git a/test/datadog/baseline_test.go b/test/datadog/baseline_test.go new file mode 100644 index 000000000..8118d5128 --- /dev/null +++ b/test/datadog/baseline_test.go @@ -0,0 +1,195 @@ +package datadog + +import ( + "bufio" + "io" + "strings" + "testing" + + "github.com/DataDog/helm-charts/test/common" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/yaml" +) + +func Test_baseline_manifests(t *testing.T) { + tests := []struct { + name string + command common.HelmCommand + baselineManifestPath string + assertions func(t *testing.T, baselineManifestPath, manifest string) + }{ + { + name: "Daemonset default", + command: common.HelmCommand{ + ReleaseName: "datadog", + ChartPath: "../../charts/datadog", + ShowOnly: []string{"templates/daemonset.yaml"}, + Values: []string{"../../charts/datadog/values.yaml"}, + Overrides: map[string]string{ + "datadog.apiKeyExistingSecret": "datadog-secret", + "datadog.appKeyExistingSecret": "datadog-secret", + }, + }, + baselineManifestPath: "./baseline/daemonset_default.yaml", + assertions: verifyDaemonset, + }, + { + name: "DCA Deployment default", + command: common.HelmCommand{ + ReleaseName: "datadog", + ChartPath: "../../charts/datadog", + ShowOnly: []string{"templates/cluster-agent-deployment.yaml"}, + Values: []string{"../../charts/datadog/values.yaml"}, + Overrides: map[string]string{}, + }, + baselineManifestPath: "./baseline/cluster-agent-deployment_default.yaml", + assertions: verifyDeployment, + }, + { + name: "DCA Deployment default with minimal AC sidecar injection", + command: common.HelmCommand{ + ReleaseName: "datadog", + ChartPath: "../../charts/datadog", + ShowOnly: []string{"templates/cluster-agent-deployment.yaml"}, + Values: []string{"../../charts/datadog/values.yaml", + "./manifests/dca_AC_sidecar_fargateMinimal.yaml"}, + Overrides: map[string]string{}, + }, + baselineManifestPath: "./baseline/cluster-agent-deployment_default_minimal_AC_injection.yaml", + assertions: verifyDeployment, + }, + { + name: "DCA Deployment default with advanced AC sidecar injection", + command: common.HelmCommand{ + ReleaseName: "datadog", + ChartPath: "../../charts/datadog", + ShowOnly: []string{"templates/cluster-agent-deployment.yaml"}, + Values: []string{"../../charts/datadog/values.yaml", + "./manifests/dca_AC_sidecar_advanced.yaml"}, + Overrides: map[string]string{}, + }, + baselineManifestPath: "./baseline/cluster-agent-deployment_default_advanced_AC_injection.yaml", + assertions: verifyDeployment, + }, + { + name: "CLC Deployment default", + command: common.HelmCommand{ + ReleaseName: "datadog", + ChartPath: "../../charts/datadog", + ShowOnly: []string{"templates/agent-clusterchecks-deployment.yaml"}, + Values: []string{"../../charts/datadog/values.yaml"}, + Overrides: map[string]string{ + "datadog.apiKeyExistingSecret": "datadog-secret", + "datadog.appKeyExistingSecret": "datadog-secret", + "datadog.kubeStateMetricsCore.useClusterCheckRunners": "true", + "datadog.clusterChecks.enabled": "true", + "clusterChecksRunner.enabled": "true", + }}, + baselineManifestPath: "./baseline/agent-clusterchecks-deployment_default.yaml", + assertions: verifyDeployment, + }, + { + name: "Other resources, skips Deployment, DaemonSet, Secret; creates PDBs", + command: common.HelmCommand{ + ReleaseName: "datadog", + ChartPath: "../../charts/datadog", + ShowOnly: []string{}, + Values: []string{"../../charts/datadog/values.yaml"}, + Overrides: map[string]string{ + "datadog.apiKeyExistingSecret": "datadog-secret", + "datadog.appKeyExistingSecret": "datadog-secret", + "datadog.kubeStateMetricsCore.useClusterCheckRunners": "true", + "datadog.clusterChecks.enabled": "true", + "clusterChecksRunner.enabled": "true", + // Create PDB for DCA and CLC + "clusterAgent.createPodDisruptionBudget": "true", + "clusterChecksRunner.createPodDisruptionBudget": "true", + }}, + baselineManifestPath: "./baseline/other_default.yaml", + assertions: verifyUntypedResources, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + manifest, err := common.RenderChart(t, tt.command) + assert.Nil(t, err, "couldn't render template") + t.Log("update baselines", common.UpdateBaselines) + if common.UpdateBaselines { + common.WriteToFile(t, tt.baselineManifestPath, manifest) + } + tt.assertions(t, tt.baselineManifestPath, manifest) + }) + } +} + +func verifyDaemonset(t *testing.T, baselineManifestPath, manifest string) { + verifyBaseline(t, baselineManifestPath, manifest, appsv1.DaemonSet{}, appsv1.DaemonSet{}) +} + +func verifyDeployment(t *testing.T, baselineManifestPath, manifest string) { + verifyBaseline(t, baselineManifestPath, manifest, appsv1.Deployment{}, appsv1.Deployment{}) +} + +func verifyBaseline[T any](t *testing.T, baselineManifestPath, manifest string, baseline, actual T) { + common.Unmarshal(t, manifest, &actual) + common.LoadFromFile(t, baselineManifestPath, &baseline) + + // Exclude + // - "helm.sh/chart" label + // - checksum annotations + // - Image + // to avoid frequent baseline update and CI failures. + ops := make(cmp.Options, 0) + ops = append(ops, cmpopts.IgnoreMapEntries(func(k, v string) bool { + return k == "helm.sh/chart" || k == "checksum/clusteragent_token" || strings.Contains(k, "checksum") + })) + ops = append(ops, cmpopts.IgnoreFields(corev1.Container{}, "Image")) + + assert.True(t, cmp.Equal(baseline, actual, ops), cmp.Diff(baseline, actual)) +} + +func verifyUntypedResources(t *testing.T, baselineManifestPath, actual string) { + baselineManifest := common.ReadFile(t, baselineManifestPath) + + rB := bufio.NewReader(strings.NewReader(baselineManifest)) + baselineReader := yaml.NewYAMLReader(rB) + rA := bufio.NewReader(strings.NewReader(actual)) + expectedReader := yaml.NewYAMLReader(rA) + + for { + baselineResource, errB := baselineReader.Read() + actualResource, errA := expectedReader.Read() + if errB == io.EOF || errA == io.EOF { + break + } + require.NoError(t, errB, "couldn't read resource from manifest", baselineManifest) + require.NoError(t, errA, "couldn't read resource from manifest", actual) + + // unmarshal as map since this can be any resource + var expected, actual map[string]interface{} + yaml.Unmarshal(baselineResource, &expected) + yaml.Unmarshal(actualResource, &actual) + + assert.Equal(t, expected["kind"], actual["kind"]) + kind := expected["kind"] + if kind == "Deployment" || kind == "DaemonSet" || kind == "Secret" { + continue + } + + ops := make(cmp.Options, 0) + ops = append(ops, cmpopts.IgnoreMapEntries(func(k string, v any) bool { + // skip these as these change frequently + t.Log(k, v) + return k == "helm.sh/chart" || k == "token" || strings.Contains(k, "checksum") || + k == "Image" || k == "install_id" || k == "install_time" + })) + + assert.True(t, cmp.Equal(expected, actual, ops), cmp.Diff(expected, actual)) + } +} diff --git a/test/datadog/dca_AC_sidecar_test.go b/test/datadog/dca_AC_sidecar_test.go new file mode 100644 index 000000000..91fda3b03 --- /dev/null +++ b/test/datadog/dca_AC_sidecar_test.go @@ -0,0 +1,162 @@ +package datadog + +import ( + "encoding/json" + "testing" + + "github.com/DataDog/helm-charts/test/common" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + DDSidecarEnabled = "DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_ENABLED" + DDSidecarClusterAgentEnabled = "DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_CLUSTER_AGENT_ENABLED" + DDSidecarProvider = "DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_PROVIDER" + DDSidecarRegistry = "DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_CONTAINER_REGISTRY" + DDSidecarImageName = "DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_IMAGE_NAME" + DDSidecarImageTag = "DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_IMAGE_TAG" + DDSidecarSelectors = "DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_SELECTORS" + DDSidecarProfiles = "DD_ADMISSION_CONTROLLER_AGENT_SIDECAR_PROFILES" +) + +func Test_admissionControllerConfig(t *testing.T) { + tests := []struct { + name string + command common.HelmCommand + assertions func(t *testing.T, manifest string) + }{ + { + name: "AC sidecar injection, minimal Fargate config", + command: common.HelmCommand{ + ReleaseName: "datadog", + ChartPath: "../../charts/datadog", + ShowOnly: []string{"templates/cluster-agent-deployment.yaml"}, + Values: []string{"../../charts/datadog/values.yaml", + "./manifests/dca_AC_sidecar_fargateMinimal.yaml"}, + Overrides: map[string]string{ + // "clusterAgent.admissionController.enabled": "true", + // "clusterAgent.admissionController.agentSidecarInjection.enabled": "true", + }, + }, + assertions: verifyDeploymentFargateMinimal, + }, + { + name: "AC sidecar injection, advanced config", + command: common.HelmCommand{ + ReleaseName: "datadog", + ChartPath: "../../charts/datadog", + ShowOnly: []string{"templates/cluster-agent-deployment.yaml"}, + Values: []string{"../../charts/datadog/values.yaml", + "./manifests/dca_AC_sidecar_advanced.yaml"}, + Overrides: map[string]string{}, + }, + assertions: verifyDeploymentAdvancedConfig, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + manifest, err := common.RenderChart(t, tt.command) + assert.Nil(t, err, "couldn't render template") + tt.assertions(t, manifest) + }) + } +} + +// V1 structs are for the current scope +type Selector struct { + ObjectSelector metav1.LabelSelector `json:"objectSelector,omitempty"` + NamespaceSelector metav1.LabelSelector `json:"namespaceSelector,omitempty"` +} + +type ProfileOverride struct { + EnvVars []corev1.EnvVar `json:"env,omitempty"` + ResourceRequirements corev1.ResourceRequirements `json:"resources,omitempty"` +} + +func verifyDeploymentFargateMinimal(t *testing.T, manifest string) { + var deployment appsv1.Deployment + common.Unmarshal(t, manifest, &deployment) + dcaContainer := deployment.Spec.Template.Spec.Containers[0] + + acConfigEnv := selectEnvVars(dcaContainer.Env) + + assert.Equal(t, "true", acConfigEnv[DDSidecarEnabled]) + assert.Equal(t, "true", acConfigEnv[DDSidecarClusterAgentEnabled]) + assert.Equal(t, "fargate", acConfigEnv[DDSidecarProvider]) + // Default will be set by DCA + assert.Empty(t, acConfigEnv[DDSidecarRegistry]) + assert.Equal(t, "agent", acConfigEnv[DDSidecarImageName]) + assert.Equal(t, "7.51.0", acConfigEnv[DDSidecarImageTag]) + assert.Empty(t, acConfigEnv[DDSidecarSelectors]) + assert.Empty(t, acConfigEnv[DDSidecarProfiles]) +} + +func verifyDeploymentAdvancedConfig(t *testing.T, manifest string) { + var deployment appsv1.Deployment + common.Unmarshal(t, manifest, &deployment) + dcaContainer := deployment.Spec.Template.Spec.Containers[0] + + acConfigEnv := selectEnvVars(dcaContainer.Env) + + assert.Equal(t, "true", acConfigEnv[DDSidecarEnabled]) + assert.Equal(t, "false", acConfigEnv[DDSidecarClusterAgentEnabled]) + assert.Empty(t, acConfigEnv[DDSidecarProvider]) + assert.Equal(t, "gcr.io/datadoghq", acConfigEnv[DDSidecarRegistry]) + assert.Equal(t, "agent", acConfigEnv[DDSidecarImageName]) + assert.Equal(t, "7.52.0", acConfigEnv[DDSidecarImageTag]) + assert.NotEmpty(t, acConfigEnv[DDSidecarSelectors]) + assert.NotEmpty(t, acConfigEnv[DDSidecarProfiles]) + + selectorsAsString := acConfigEnv[DDSidecarSelectors] + profilesAsString := acConfigEnv[DDSidecarProfiles] + + var selectors []Selector + err := json.Unmarshal([]byte(selectorsAsString), &selectors) + assert.Nil(t, err) + selector := selectors[0] + assert.Equal(t, "nodeless", selector.ObjectSelector.MatchLabels["runsOn"]) + assert.Equal(t, "nginx", selector.ObjectSelector.MatchLabels["app"]) + assert.Equal(t, "true", selector.NamespaceSelector.MatchLabels["agentSidecars"]) + + var profiles []ProfileOverride + err = json.Unmarshal([]byte(profilesAsString), &profiles) + assert.Nil(t, err) + profile := profiles[0] + assert.Equal(t, "DD_ORCHESTRATOR_EXPLORER_ENABLED", profile.EnvVars[0].Name) + assert.Equal(t, "false", profile.EnvVars[0].Value) + assert.Equal(t, "DD_TAGS", profile.EnvVars[1].Name) + // Agent expects space-separated pairs + assert.Equal(t, "key1:value1 key2:value2", profile.EnvVars[1].Value) + assert.Equal(t, "1", profile.ResourceRequirements.Requests.Cpu().String()) + assert.Equal(t, "512Mi", profile.ResourceRequirements.Requests.Memory().String()) + assert.Equal(t, "2", profile.ResourceRequirements.Limits.Cpu().String()) + assert.Equal(t, "1Gi", profile.ResourceRequirements.Limits.Memory().String()) +} + +func selectEnvVars(envVars []corev1.EnvVar) map[string]string { + acConfoigNames := []string{ + DDSidecarEnabled, + DDSidecarClusterAgentEnabled, + DDSidecarProvider, + DDSidecarRegistry, + DDSidecarImageName, + DDSidecarImageTag, + DDSidecarSelectors, + DDSidecarProfiles, + } + + selection := map[string]string{} + + for _, envVar := range envVars { + for _, name := range acConfoigNames { + if envVar.Name == name { + selection[name] = envVar.Value + } + } + } + return selection +} diff --git a/test/datadog/manifests/dca_AC_sidecar_advanced.yaml b/test/datadog/manifests/dca_AC_sidecar_advanced.yaml new file mode 100644 index 000000000..10404420b --- /dev/null +++ b/test/datadog/manifests/dca_AC_sidecar_advanced.yaml @@ -0,0 +1,31 @@ +clusterAgent: + enabled: true + admissionController: + enabled: true + agentSidecarInjection: + enabled: true + clusterAgentCommunicationEnabled: false + containerRegistry: gcr.io/datadoghq + imageName: agent + imageTag: 7.52.0 + selectors: + - objectSelector: + matchLabels: + "runsOn": nodeless + "app": nginx + namespaceSelector: + matchLabels: + agentSidecars: "true" + profiles: + - env: + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: "false" + - name: DD_TAGS + value: "key1:value1 key2:value2" + resources: + requests: + cpu: "1" + memory: "512Mi" + limits: + cpu: "2" + memory: "1024Mi" \ No newline at end of file diff --git a/test/datadog/manifests/dca_AC_sidecar_fargateMinimal.yaml b/test/datadog/manifests/dca_AC_sidecar_fargateMinimal.yaml new file mode 100644 index 000000000..9dbd3a20a --- /dev/null +++ b/test/datadog/manifests/dca_AC_sidecar_fargateMinimal.yaml @@ -0,0 +1,8 @@ +clusterAgent: + enabled: true + admissionController: + enabled: true + clusterAgentCommunicationEnabled: false + agentSidecarInjection: + enabled: true + provider: fargate diff --git a/test/datadog/testmain_test.go b/test/datadog/testmain_test.go new file mode 100644 index 000000000..8610597a8 --- /dev/null +++ b/test/datadog/testmain_test.go @@ -0,0 +1,13 @@ +package datadog + +import ( + "os" + "testing" + + "github.com/DataDog/helm-charts/test/common" +) + +func TestMain(m *testing.M) { + common.ParseArgs() + os.Exit(m.Run()) +}