From 00ad82054ee0047edfc03a1b97b923b7752f42f1 Mon Sep 17 00:00:00 2001 From: sridhar Date: Mon, 11 Sep 2023 14:54:18 -0700 Subject: [PATCH 1/3] operator changes for log collector, cluster connection --- config/calico_versions.yml | 5 + hack/gen-versions/calico.go.tpl | 16 + hack/gen-versions/components.go | 2 + hack/gen-versions/enterprise.go.tpl | 8 +- .../crd.projectcalico.org/v1/felixconfig.go | 15 + .../v1/zz_generated.deepcopy.go | 25 + pkg/components/calico.go | 14 + pkg/components/enterprise.go | 8 +- pkg/components/references.go | 3 +- .../clusterconnection_controller.go | 145 +++--- .../clusterconnection_controller_test.go | 135 ++++++ .../logcollector/logcollector_controller.go | 447 ++++++++++-------- .../logcollector_controller_test.go | 176 ++++++- .../managed_cluster_controller.go | 65 ++- .../managed_cluster_controller_test.go | 59 +++ pkg/controller/utils/discovery.go | 4 - ...ojectcalico.org_globalnetworkpolicies.yaml | 13 + ...crd.projectcalico.org_networkpolicies.yaml | 13 + ...ojectcalico.org_securityeventwebhooks.yaml | 181 +++++++ pkg/render/apiserver.go | 8 +- pkg/render/fluentd.go | 105 ++-- pkg/render/fluentd_test.go | 94 ++++ pkg/render/guardian.go | 19 +- pkg/render/guardian_test.go | 88 +++- .../kubecontrollers/kube-controllers.go | 22 +- pkg/render/logstorage.go | 5 +- pkg/render/logstorage_test.go | 46 +- pkg/render/node.go | 25 +- pkg/render/node_test.go | 15 + pkg/render/typha.go | 5 + 30 files changed, 1365 insertions(+), 401 deletions(-) create mode 100644 pkg/crds/enterprise/crd.projectcalico.org_securityeventwebhooks.yaml diff --git a/config/calico_versions.yml b/config/calico_versions.yml index e778dadcce..8b156ecf74 100644 --- a/config/calico_versions.yml +++ b/config/calico_versions.yml @@ -21,3 +21,8 @@ components: version: master csi-node-driver-registrar: version: master + calico/fluentd: + version: master + calico/guardian: + version: master + image: tigera/guardian diff --git a/hack/gen-versions/calico.go.tpl b/hack/gen-versions/calico.go.tpl index affffd5a9d..41cebbe1a1 100644 --- a/hack/gen-versions/calico.go.tpl +++ b/hack/gen-versions/calico.go.tpl @@ -126,12 +126,26 @@ var ( Registry: "{{ .Registry }}", } {{- end }} +{{ with index .Components "calico/fluentd"}} + ComponentCalicoFluentd = component{ + Version: "{{ .Version }}", + Image: "{{ .Image }}", + Registry: "{{ .Registry }}", + } +{{- end }} {{ with index .Components "csi-node-driver-registrar"}} ComponentCalicoCSIRegistrarFIPS = component{ Version: "{{ .Version }}-fips", Image: "{{ .Image }}", Registry: "{{ .Registry }}", } +{{- end }} +{{ with index .Components "calico/guardian" }} + ComponentCalicoGuardian = component{ + Version: "{{ .Version }}", + Image: "{{ .Image }}", + Registry: "{{ .Registry }}", + } {{- end }} ComponentOperatorInit = component{ Version: version.VERSION, @@ -155,6 +169,8 @@ var ( ComponentCalicoCSI, ComponentCalicoCSIFIPS, ComponentCalicoCSIRegistrar, + ComponentCalicoFluentd, ComponentCalicoCSIRegistrarFIPS, + ComponentCalicoGuardian, } ) diff --git a/hack/gen-versions/components.go b/hack/gen-versions/components.go index 9f9ff311bb..2cf2a890fd 100644 --- a/hack/gen-versions/components.go +++ b/hack/gen-versions/components.go @@ -44,6 +44,8 @@ var defaultImages = map[string]string{ "calico/apiserver": "calico/apiserver", "calico/windows-upgrade": "calico/windows-upgrade", "tigera/linseed": "tigera/linseed", + "calico/fluentd": "calico/fluentd", + "calico/guardian": "calico/guardian", } var ignoredImages = map[string]struct{}{ diff --git a/hack/gen-versions/enterprise.go.tpl b/hack/gen-versions/enterprise.go.tpl index 7fb6889709..7de16f4b05 100644 --- a/hack/gen-versions/enterprise.go.tpl +++ b/hack/gen-versions/enterprise.go.tpl @@ -143,14 +143,14 @@ var ( } {{- end }} {{ with .Components.fluentd }} - ComponentFluentd = component{ + ComponentTigeraFluentd = component{ Version: "{{ .Version }}", Image: "{{ .Image }}", Registry: "{{ .Registry }}", } {{- end }} {{ with index .Components "fluentd-windows" }} - ComponentFluentdWindows = component{ + ComponentTigeraFluentdWindows = component{ Version: "{{ .Version }}", Image: "{{ .Image }}", Registry: "{{ .Registry }}", @@ -387,8 +387,8 @@ var ( ComponentElasticsearchOperator, ComponentEsCurator, ComponentEsProxy, - ComponentFluentd, - ComponentFluentdWindows, + ComponentTigeraFluentd, + ComponentTigeraFluentdWindows, ComponentGuardian, ComponentIntrusionDetectionController, ComponentAnomalyDetectionJobs, diff --git a/pkg/apis/crd.projectcalico.org/v1/felixconfig.go b/pkg/apis/crd.projectcalico.org/v1/felixconfig.go index 1bb04d0993..28a8abd5d3 100644 --- a/pkg/apis/crd.projectcalico.org/v1/felixconfig.go +++ b/pkg/apis/crd.projectcalico.org/v1/felixconfig.go @@ -387,6 +387,21 @@ type FelixConfigurationSpec struct { // `[fd00:83a6::12]:5353`.Note that Felix (calico-node) will need RBAC permission to read the details of // each service specified by a `k8s-service:...` form. [Default: "k8s-service:kube-dns"]. DNSTrustedServers *[]string `json:"dnsTrustedServers,omitempty"` + + // FlowLogsFileEnabled controls whether flow logs is enabled. [Default: false] + FlowLogsFileEnabled *bool `json:"flowLogsFileEnabled,omitempty"` + + // FlowLogsFileIncludeService controls whether service information is enabled in the flow logs. [Default: false] + FlowLogsFileIncludeService *bool `json:"flowLogsFileIncludeService,omitempty"` + + // FlowLogsFileIncludePolicies controls whether policy information is enabled in the flow logs. [Default: false] + FlowLogsFileIncludePolicies *bool `json:"flowLogsFileIncludePolicies,omitempty"` + + // FlowLogsEnableHostEndpoint enables flow logs reporting for host endpoints. [Default: false] + FlowLogsEnableHostEndpoint *bool `json:"flowLogsEnableHostEndpoint,omitempty"` + + // FlowLogsEnableNetworkSets enables flow logs reporting for global network sets. [Default: false] + FlowLogsEnableNetworkSets *bool `json:"flowLogsEnableNetworkSets,omitempty"` } type RouteTableRange struct { diff --git a/pkg/apis/crd.projectcalico.org/v1/zz_generated.deepcopy.go b/pkg/apis/crd.projectcalico.org/v1/zz_generated.deepcopy.go index 335ac34d4c..a7f8193165 100644 --- a/pkg/apis/crd.projectcalico.org/v1/zz_generated.deepcopy.go +++ b/pkg/apis/crd.projectcalico.org/v1/zz_generated.deepcopy.go @@ -698,6 +698,31 @@ func (in *FelixConfigurationSpec) DeepCopyInto(out *FelixConfigurationSpec) { copy(*out, *in) } } + if in.FlowLogsFileEnabled != nil { + in, out := &in.FlowLogsFileEnabled, &out.FlowLogsFileEnabled + *out = new(bool) + **out = **in + } + if in.FlowLogsFileIncludeService != nil { + in, out := &in.FlowLogsFileIncludeService, &out.FlowLogsFileIncludeService + *out = new(bool) + **out = **in + } + if in.FlowLogsFileIncludePolicies != nil { + in, out := &in.FlowLogsFileIncludePolicies, &out.FlowLogsFileIncludePolicies + *out = new(bool) + **out = **in + } + if in.FlowLogsEnableHostEndpoint != nil { + in, out := &in.FlowLogsEnableHostEndpoint, &out.FlowLogsEnableHostEndpoint + *out = new(bool) + **out = **in + } + if in.FlowLogsEnableNetworkSets != nil { + in, out := &in.FlowLogsEnableNetworkSets, &out.FlowLogsEnableNetworkSets + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FelixConfigurationSpec. diff --git a/pkg/components/calico.go b/pkg/components/calico.go index f82e8f2a66..21b3d24559 100644 --- a/pkg/components/calico.go +++ b/pkg/components/calico.go @@ -112,11 +112,23 @@ var ( Registry: "", } + ComponentCalicoFluentd = component{ + Version: "master", + Image: "calico/fluentd", + Registry: "", + } + ComponentCalicoCSIRegistrarFIPS = component{ Version: "master-fips", Image: "calico/node-driver-registrar", Registry: "", } + + ComponentCalicoGuardian = component{ + Version: "master", + Image: "tigera/guardian", + Registry: "", + } ComponentOperatorInit = component{ Version: version.VERSION, Image: "tigera/operator", @@ -139,6 +151,8 @@ var ( ComponentCalicoCSI, ComponentCalicoCSIFIPS, ComponentCalicoCSIRegistrar, + ComponentCalicoFluentd, ComponentCalicoCSIRegistrarFIPS, + ComponentCalicoGuardian, } ) diff --git a/pkg/components/enterprise.go b/pkg/components/enterprise.go index b229a8c674..1db3b8d0a9 100644 --- a/pkg/components/enterprise.go +++ b/pkg/components/enterprise.go @@ -125,13 +125,13 @@ var ( Registry: "", } - ComponentFluentd = component{ + ComponentTigeraFluentd = component{ Version: "master", Image: "tigera/fluentd", Registry: "", } - ComponentFluentdWindows = component{ + ComponentTigeraFluentdWindows = component{ Version: "master", Image: "tigera/fluentd-windows", Registry: "", @@ -336,8 +336,8 @@ var ( ComponentElasticsearchOperator, ComponentEsCurator, ComponentEsProxy, - ComponentFluentd, - ComponentFluentdWindows, + ComponentTigeraFluentd, + ComponentTigeraFluentdWindows, ComponentGuardian, ComponentIntrusionDetectionController, ComponentAnomalyDetectionJobs, diff --git a/pkg/components/references.go b/pkg/components/references.go index c33816fcc6..b1d9b521e5 100644 --- a/pkg/components/references.go +++ b/pkg/components/references.go @@ -53,7 +53,8 @@ func GetReference(c component, registry, imagePath, imagePrefix string, is *oper ComponentCalicoCSI, ComponentCalicoCSIFIPS, ComponentCalicoCSIRegistrar, - ComponentCalicoCSIRegistrarFIPS: + ComponentCalicoCSIRegistrarFIPS, + ComponentCalicoFluentd: registry = CalicoRegistry case ComponentOperatorInit: diff --git a/pkg/controller/clusterconnection/clusterconnection_controller.go b/pkg/controller/clusterconnection/clusterconnection_controller.go index 855361fcd3..5dbb308c18 100644 --- a/pkg/controller/clusterconnection/clusterconnection_controller.go +++ b/pkg/controller/clusterconnection/clusterconnection_controller.go @@ -60,10 +60,6 @@ var log = logf.Log.WithName(controllerName) // Add creates a new ManagementClusterConnection Controller and adds it to the Manager. The Manager will set fields on the Controller // and start it when the Manager is started. This controller is meant only for enterprise users. func Add(mgr manager.Manager, opts options.AddOptions) error { - if !opts.EnterpriseCRDExists { - // No need to start this controller. - return nil - } statusManager := status.New(mgr.GetClient(), "management-cluster-connection", opts.KubernetesVersion) // Create the reconciler @@ -76,22 +72,24 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { return fmt.Errorf("failed to create %s: %w", controllerName, err) } - k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig()) - if err != nil { - log.Error(err, "Failed to establish a connection to k8s") - return err - } + if opts.EnterpriseCRDExists { + k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig()) + if err != nil { + log.Error(err, "Failed to establish a connection to k8s") + return err + } - // Watch for changes to License and Tier, as their status is used as input to determine whether network policy should be reconciled by this controller. - go utils.WaitToAddLicenseKeyWatch(controller, k8sClient, log, nil) - go utils.WaitToAddTierWatch(networkpolicy.TigeraComponentTierName, controller, k8sClient, log, tierWatchReady) + // Watch for changes to License and Tier, as their status is used as input to determine whether network policy should be reconciled by this controller. + go utils.WaitToAddLicenseKeyWatch(controller, k8sClient, log, nil) + go utils.WaitToAddTierWatch(networkpolicy.TigeraComponentTierName, controller, k8sClient, log, tierWatchReady) - go utils.WaitToAddNetworkPolicyWatches(controller, k8sClient, log, []types.NamespacedName{ - {Name: render.GuardianPolicyName, Namespace: render.GuardianNamespace}, - {Name: networkpolicy.TigeraComponentDefaultDenyPolicyName, Namespace: render.GuardianNamespace}, - }) + go utils.WaitToAddNetworkPolicyWatches(controller, k8sClient, log, []types.NamespacedName{ + {Name: render.GuardianPolicyName, Namespace: render.GuardianNamespace}, + {Name: networkpolicy.TigeraComponentDefaultDenyPolicyName, Namespace: render.GuardianNamespace}, + }) + } - return add(mgr, controller) + return add(mgr, controller, opts.EnterpriseCRDExists) } // newReconciler returns a new reconcile.Reconciler @@ -117,15 +115,10 @@ func newReconciler( } // add adds a new controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, c controller.Controller) error { - // Watch for changes to primary resource ManagementCluster - err := c.Watch(&source.Kind{Type: &operatorv1.ManagementCluster{}}, &handler.EnqueueRequestForObject{}) - if err != nil { - return fmt.Errorf("%s failed to watch primary resource: %w", controllerName, err) - } +func add(mgr manager.Manager, c controller.Controller, enterpriseCRDExists bool) error { // Watch for changes to primary resource ManagementClusterConnection - err = c.Watch(&source.Kind{Type: &operatorv1.ManagementClusterConnection{}}, &handler.EnqueueRequestForObject{}) + err := c.Watch(&source.Kind{Type: &operatorv1.ManagementClusterConnection{}}, &handler.EnqueueRequestForObject{}) if err != nil { return fmt.Errorf("%s failed to watch primary resource: %w", controllerName, err) } @@ -135,13 +128,20 @@ func add(mgr manager.Manager, c controller.Controller) error { return fmt.Errorf("%s failed to watch Secret resource %s: %w", controllerName, render.GuardianSecretName, err) } - // Watch for changes to the secrets associated with the PacketCapture APIs. - if err = utils.AddSecretsWatch(c, render.PacketCaptureServerCert, common.OperatorNamespace()); err != nil { - return fmt.Errorf("%s failed to watch Secret resource %s: %w", controllerName, render.PacketCaptureServerCert, err) - } - // Watch for changes to the secrets associated with Prometheus. - if err = utils.AddSecretsWatch(c, monitor.PrometheusTLSSecretName, common.OperatorNamespace()); err != nil { - return fmt.Errorf("%s failed to watch Secret resource %s: %w", controllerName, monitor.PrometheusTLSSecretName, err) + if enterpriseCRDExists { + // Watch for changes to primary resource ManagementCluster + err := c.Watch(&source.Kind{Type: &operatorv1.ManagementCluster{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return fmt.Errorf("%s failed to watch primary resource: %w", controllerName, err) + } + // Watch for changes to the secrets associated with the PacketCapture APIs. + if err = utils.AddSecretsWatch(c, render.PacketCaptureServerCert, common.OperatorNamespace()); err != nil { + return fmt.Errorf("%s failed to watch Secret resource %s: %w", controllerName, render.PacketCaptureServerCert, err) + } + // Watch for changes to the secrets associated with Prometheus. + if err = utils.AddSecretsWatch(c, monitor.PrometheusTLSSecretName, common.OperatorNamespace()); err != nil { + return fmt.Errorf("%s failed to watch Secret resource %s: %w", controllerName, monitor.PrometheusTLSSecretName, err) + } } if err = utils.AddSecretsWatch(c, certificatemanagement.CASecretName, common.OperatorNamespace()); err != nil { @@ -192,10 +192,13 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R return result, err } - managementCluster, err := utils.GetManagementCluster(ctx, r.Client) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementCluster", err, reqLogger) - return reconcile.Result{}, err + var managementCluster *operatorv1.ManagementCluster + if variant == operatorv1.TigeraSecureEnterprise { + managementCluster, err = utils.GetManagementCluster(ctx, r.Client) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementCluster", err, reqLogger) + return reconcile.Result{}, err + } } // Fetch the managementClusterConnection. @@ -278,7 +281,13 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R trustedCertBundle = certificateManager.CreateTrustedBundle() } - for _, secretName := range []string{render.PacketCaptureServerCert, monitor.PrometheusTLSSecretName, render.ProjectCalicoAPIServerTLSSecretName(instl.Variant)} { + var certs []string + if variant == operatorv1.TigeraSecureEnterprise { + certs = []string{render.PacketCaptureServerCert, monitor.PrometheusTLSSecretName, render.ProjectCalicoAPIServerTLSSecretName(instl.Variant)} + } else { + certs = []string{render.ProjectCalicoAPIServerTLSSecretName(instl.Variant)} + } + for _, secretName := range certs { secret, err := certificateManager.GetCertificate(r.Client, secretName, common.OperatorNamespace()) if err != nil { r.status.SetDegraded(operatorv1.ResourceReadError, fmt.Sprintf("Failed to retrieve %s", secretName), err, reqLogger) @@ -291,42 +300,44 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R trustedCertBundle.AddCertificates(secret) } - // Validate that the tier watch is ready before querying the tier to ensure we utilize the cache. - if !r.tierWatchReady.IsReady() { - r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Tier watch to be established", nil, reqLogger) - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } - - // Ensure the allow-tigera tier exists, before rendering any network policies within it. includeV3NetworkPolicy := false - if err := r.Client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil { - // The creation of the Tier depends on this controller to reconcile it's non-NetworkPolicy resources so that the - // License becomes available. Therefore, if we fail to query the Tier, we exclude NetworkPolicy from reconciliation - // and tolerate errors arising from the Tier not being created. - if !k8serrors.IsNotFound(err) { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying allow-tigera tier", err, reqLogger) - return reconcile.Result{}, err + // Validate that the tier watch is ready before querying the tier to ensure we utilize the cache. + if variant == operatorv1.TigeraSecureEnterprise { + if !r.tierWatchReady.IsReady() { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Tier watch to be established", nil, reqLogger) + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } - } else { - includeV3NetworkPolicy = true - - // The Tier has been created, which means that this controller's reconciliation should no longer be a dependency - // of the License being deployed. If NetworkPolicy requires license features, it should now be safe to validate - // License presence and sufficiency. - if networkPolicyRequiresEgressAccessControl(managementClusterConnection, log) { - license, err := utils.FetchLicenseKey(ctx, r.Client) - if err != nil { - if k8serrors.IsNotFound(err) { - r.status.SetDegraded(operatorv1.ResourceNotFound, "License not found", err, reqLogger) + + // Ensure the allow-tigera tier exists, before rendering any network policies within it. + if err := r.Client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil { + // The creation of the Tier depends on this controller to reconcile it's non-NetworkPolicy resources so that the + // License becomes available. Therefore, if we fail to query the Tier, we exclude NetworkPolicy from reconciliation + // and tolerate errors arising from the Tier not being created. + if !k8serrors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying allow-tigera tier", err, reqLogger) + return reconcile.Result{}, err + } + } else { + includeV3NetworkPolicy = true + + // The Tier has been created, which means that this controller's reconciliation should no longer be a dependency + // of the License being deployed. If NetworkPolicy requires license features, it should now be safe to validate + // License presence and sufficiency. + if networkPolicyRequiresEgressAccessControl(managementClusterConnection, log) { + license, err := utils.FetchLicenseKey(ctx, r.Client) + if err != nil { + if k8serrors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotFound, "License not found", err, reqLogger) + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } + r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying license", err, reqLogger) return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } - r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying license", err, reqLogger) - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } - if !utils.IsFeatureActive(license, common.EgressAccessControlFeature) { - r.status.SetDegraded(operatorv1.ResourceReadError, "Feature is not active - License does not support feature: egress-access-control", nil, reqLogger) - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + if !utils.IsFeatureActive(license, common.EgressAccessControlFeature) { + r.status.SetDegraded(operatorv1.ResourceReadError, "Feature is not active - License does not support feature: egress-access-control", nil, reqLogger) + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } } } } diff --git a/pkg/controller/clusterconnection/clusterconnection_controller_test.go b/pkg/controller/clusterconnection/clusterconnection_controller_test.go index c5e346ce54..9fc4e92c8f 100644 --- a/pkg/controller/clusterconnection/clusterconnection_controller_test.go +++ b/pkg/controller/clusterconnection/clusterconnection_controller_test.go @@ -512,3 +512,138 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { }) }) }) + +var _ = Describe("ManagementClusterConnection controller tests(OSS)", func() { + var c client.Client + var ctx context.Context + var cfg *operatorv1.ManagementClusterConnection + var r reconcile.Reconciler + var scheme *runtime.Scheme + var dpl *appsv1.Deployment + var mockStatus *status.MockStatus + ready := &utils.ReadyFlag{} + ready.MarkAsReady() + + BeforeEach(func() { + // Create a Kubernetes client. + scheme = runtime.NewScheme() + Expect(apis.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(appsv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(rbacv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + err := operatorv1.SchemeBuilder.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + c = fake.NewClientBuilder().WithScheme(scheme).Build() + ctx = context.Background() + mockStatus = &status.MockStatus{} + mockStatus.On("Run").Return() + + mockStatus.On("AddDaemonsets", mock.Anything) + mockStatus.On("AddDeployments", mock.Anything) + mockStatus.On("AddStatefulSets", mock.Anything) + mockStatus.On("AddCronJobs", mock.Anything) + mockStatus.On("ClearDegraded", mock.Anything) + mockStatus.On("SetDegraded", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + mockStatus.On("OnCRFound").Return() + mockStatus.On("ReadyToMonitor") + mockStatus.On("SetMetaData", mock.Anything).Return() + + r = clusterconnection.NewReconcilerWithShims(c, scheme, mockStatus, operatorv1.ProviderNone, ready) + dpl = &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: render.GuardianDeploymentName, + Namespace: render.GuardianNamespace, + }, + } + certificateManager, err := certificatemanager.Create(c, nil, dns.DefaultClusterDomain, common.OperatorNamespace(), certificatemanager.AllowCACreation()) + Expect(err).NotTo(HaveOccurred()) + Expect(c.Create(ctx, certificateManager.KeyPair().Secret(common.OperatorNamespace()))) // Persist the root-ca in the operator namespace. + secret, err := certificateManager.GetOrCreateKeyPair(c, render.GuardianSecretName, common.OperatorNamespace(), []string{"a"}) + Expect(err).NotTo(HaveOccurred()) + + pcSecret, err := certificateManager.GetOrCreateKeyPair(c, render.PacketCaptureServerCert, common.OperatorNamespace(), []string{"a"}) + Expect(err).NotTo(HaveOccurred()) + + promSecret, err := certificateManager.GetOrCreateKeyPair(c, monitor.PrometheusTLSSecretName, common.OperatorNamespace(), []string{"a"}) + Expect(err).NotTo(HaveOccurred()) + + queryServerSecret, err := certificateManager.GetOrCreateKeyPair(c, render.ProjectCalicoAPIServerTLSSecretName(operatorv1.TigeraSecureEnterprise), common.OperatorNamespace(), []string{"a"}) + Expect(err).NotTo(HaveOccurred()) + + err = c.Create(ctx, secret.Secret(common.OperatorNamespace())) + Expect(err).NotTo(HaveOccurred()) + err = c.Create(ctx, pcSecret.Secret(common.OperatorNamespace())) + Expect(err).NotTo(HaveOccurred()) + err = c.Create(ctx, promSecret.Secret(common.OperatorNamespace())) + Expect(err).NotTo(HaveOccurred()) + err = c.Create(ctx, queryServerSecret.Secret(common.OperatorNamespace())) + Expect(err).NotTo(HaveOccurred()) + + By("applying the required prerequisites") + // Create a ManagementClusterConnection in the k8s client. + cfg = &operatorv1.ManagementClusterConnection{ + ObjectMeta: metav1.ObjectMeta{Name: "tigera-secure", Generation: 3}, + Spec: operatorv1.ManagementClusterConnectionSpec{ + ManagementClusterAddr: "127.0.0.1:12345", + }, + } + err = c.Create(ctx, cfg) + Expect(err).NotTo(HaveOccurred()) + err = c.Create( + ctx, + &operatorv1.Installation{ + Spec: operatorv1.InstallationSpec{ + Variant: operatorv1.TigeraSecureEnterprise, + Registry: "some.registry.org/", + }, + ObjectMeta: metav1.ObjectMeta{Name: "default"}, + Status: operatorv1.InstallationStatus{ + Variant: operatorv1.Calico, + Computed: &operatorv1.InstallationSpec{ + Registry: "my-reg", + KubernetesProvider: operatorv1.ProviderNone, + }, + }, + }) + Expect(err).NotTo(HaveOccurred()) + }) + + Context("default config", func() { + It("should create a default ManagementClusterConnection", func() { + By("reconciling with the required prerequisites") + err := c.Get(ctx, client.ObjectKey{Name: render.GuardianDeploymentName, Namespace: render.GuardianNamespace}, dpl) + Expect(err).To(HaveOccurred()) + _, err = r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ToNot(HaveOccurred()) + err = c.Get(ctx, client.ObjectKey{Name: render.GuardianDeploymentName, Namespace: render.GuardianNamespace}, dpl) + // Verifying that there is a deployment is enough for the purpose of this test. More detailed testing will be done + // in the render package. + Expect(err).NotTo(HaveOccurred()) + Expect(dpl.Labels["k8s-app"]).To(Equal(render.GuardianName)) + }) + }) + + Context("image reconciliation", func() { + It("should use builtin images", func() { + r = clusterconnection.NewReconcilerWithShims(c, scheme, mockStatus, operatorv1.ProviderNone, ready) + _, err := r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + + d := appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: "v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: render.GuardianDeploymentName, + Namespace: render.GuardianNamespace, + }, + } + Expect(test.GetResource(c, &d)).To(BeNil()) + Expect(d.Spec.Template.Spec.Containers).To(HaveLen(1)) + dexC := test.GetContainer(d.Spec.Template.Spec.Containers, render.GuardianDeploymentName) + Expect(dexC).ToNot(BeNil()) + Expect(dexC.Image).To(Equal( + fmt.Sprintf("some.registry.org/%s:%s", + components.ComponentCalicoGuardian.Image, + components.ComponentCalicoGuardian.Version))) + }) + }) +}) diff --git a/pkg/controller/logcollector/logcollector_controller.go b/pkg/controller/logcollector/logcollector_controller.go index 8e1da5cb50..414054b8c3 100644 --- a/pkg/controller/logcollector/logcollector_controller.go +++ b/pkg/controller/logcollector/logcollector_controller.go @@ -39,12 +39,14 @@ import ( operatorv1 "github.com/tigera/operator/api/v1" v1 "github.com/tigera/operator/api/v1" + crdv1 "github.com/tigera/operator/pkg/apis/crd.projectcalico.org/v1" "github.com/tigera/operator/pkg/common" "github.com/tigera/operator/pkg/controller/certificatemanager" "github.com/tigera/operator/pkg/controller/options" "github.com/tigera/operator/pkg/controller/status" "github.com/tigera/operator/pkg/controller/utils" "github.com/tigera/operator/pkg/controller/utils/imageset" + "github.com/tigera/operator/pkg/ptr" "github.com/tigera/operator/pkg/render" rcertificatemanagement "github.com/tigera/operator/pkg/render/certificatemanagement" relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" @@ -61,13 +63,13 @@ var log = logf.Log.WithName("controller_logcollector") // Add creates a new LogCollector Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager, opts options.AddOptions) error { - if !opts.EnterpriseCRDExists { - // No need to start this controller. - return nil - } - licenseAPIReady := &utils.ReadyFlag{} - tierWatchReady := &utils.ReadyFlag{} + var licenseAPIReady *utils.ReadyFlag + var tierWatchReady *utils.ReadyFlag + if opts.EnterpriseCRDExists { + licenseAPIReady = &utils.ReadyFlag{} + tierWatchReady = &utils.ReadyFlag{} + } // create the reconciler reconciler := newReconciler(mgr, opts, licenseAPIReady, tierWatchReady) @@ -78,25 +80,26 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { return fmt.Errorf("Failed to create logcollector-controller: %v", err) } - k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig()) - if err != nil { - log.Error(err, "Failed to establish a connection to k8s") - return err + if opts.EnterpriseCRDExists { + k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig()) + if err != nil { + log.Error(err, "Failed to establish a connection to k8s") + return err + } + go utils.WaitToAddTierWatch(networkpolicy.TigeraComponentTierName, controller, k8sClient, log, tierWatchReady) + go utils.WaitToAddLicenseKeyWatch(controller, k8sClient, log, licenseAPIReady) + go utils.WaitToAddNetworkPolicyWatches(controller, k8sClient, log, []types.NamespacedName{ + {Name: render.FluentdPolicyName, Namespace: render.LogCollectorNamespace}, + }) } - go utils.WaitToAddLicenseKeyWatch(controller, k8sClient, log, licenseAPIReady) - go utils.WaitToAddTierWatch(networkpolicy.TigeraComponentTierName, controller, k8sClient, log, tierWatchReady) - go utils.WaitToAddNetworkPolicyWatches(controller, k8sClient, log, []types.NamespacedName{ - {Name: render.FluentdPolicyName, Namespace: render.LogCollectorNamespace}, - }) - if opts.MultiTenant { if err = controller.Watch(&source.Kind{Type: &operatorv1.Tenant{}}, &handler.EnqueueRequestForObject{}); err != nil { return fmt.Errorf("logcollector-controller failed to watch Tenant resource: %w", err) } } - return add(mgr, controller) + return add(mgr, controller, opts.EnterpriseCRDExists) } // newReconciler returns a new reconcile.Reconciler @@ -117,7 +120,7 @@ func newReconciler(mgr manager.Manager, opts options.AddOptions, licenseAPIReady } // add adds watches for resources that are available at startup -func add(mgr manager.Manager, c controller.Controller) error { +func add(mgr manager.Manager, c controller.Controller, enterpriseCRDExists bool) error { var err error // Watch for changes to primary resource LogCollector @@ -141,9 +144,7 @@ func add(mgr manager.Manager, c controller.Controller) error { } for _, secretName := range []string{ - render.ElasticsearchEksLogForwarderUserSecret, - relasticsearch.PublicCertSecret, render.S3FluentdSecretName, render.EksLogForwarderSecret, - render.SplunkFluentdTokenSecretName, render.SplunkFluentdCertificateSecretName, monitor.PrometheusTLSSecretName, + relasticsearch.PublicCertSecret, monitor.PrometheusTLSSecretName, render.FluentdPrometheusTLSSecretName, render.TigeraLinseedSecret, render.VoltronLinseedPublicCert, } { if err = utils.AddSecretsWatch(c, secretName, common.OperatorNamespace()); err != nil { @@ -151,6 +152,18 @@ func add(mgr manager.Manager, c controller.Controller) error { } } + if enterpriseCRDExists { + for _, secretName := range []string{ + render.ElasticsearchEksLogForwarderUserSecret, + render.S3FluentdSecretName, render.EksLogForwarderSecret, + render.SplunkFluentdTokenSecretName, render.SplunkFluentdCertificateSecretName, + } { + if err = utils.AddSecretsWatch(c, secretName, common.OperatorNamespace()); err != nil { + return fmt.Errorf("log-collector-controller failed to watch the Secret resource(%s): %v", secretName, err) + } + } + } + for _, configMapName := range []string{render.FluentdFilterConfigMapName, relasticsearch.ClusterConfigConfigMapName} { if err = utils.AddConfigMapWatch(c, configMapName, common.OperatorNamespace(), &handler.EnqueueRequestForObject{}); err != nil { return fmt.Errorf("logcollector-controller failed to watch ConfigMap %s: %v", configMapName, err) @@ -256,6 +269,8 @@ func fillDefaults(instance *operatorv1.LogCollector) []string { func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) reqLogger.Info("Reconciling LogCollector") + var license v3.LicenseKey + var err error // Fetch the LogCollector instance instance, err := GetLogCollector(ctx, r.client) if err != nil { @@ -289,16 +304,22 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } } - // Default fields on the LogCollector instance if needed. - preDefaultPatchFrom := client.MergeFrom(instance.DeepCopy()) - modifiedFields := fillDefaults(instance) - if len(modifiedFields) > 0 { - if err = r.client.Patch(ctx, instance, preDefaultPatchFrom); err != nil { - r.status.SetDegraded(operatorv1.ResourcePatchError, fmt.Sprintf("Failed to set defaults for LogCollector fields: [%s]", - strings.Join(modifiedFields, ", "), - ), err, reqLogger) + // Fetch the Installation instance. We need this for a few reasons. + // - We need to make sure it has successfully completed installation. + // - We need to get the registry information from its spec. + variant, installation, err := utils.GetInstallation(ctx, r.client) + if err != nil { + if errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, reqLogger) return reconcile.Result{}, err } + r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying installation", err, reqLogger) + return reconcile.Result{}, err + } + + if variant != operatorv1.TigeraSecureEnterprise && (instance.Spec.AdditionalStores != nil || instance.Spec.CollectProcessPath != nil) { + r.status.SetDegraded(operatorv1.InvalidConfigurationError, "Additional log sources can be configured only in calico enterprise.", nil, reqLogger) + return reconcile.Result{}, fmt.Errorf("Additional log sources can be configured only in calico enterprise.") } if !utils.IsAPIServerReady(r.client, reqLogger) { @@ -306,60 +327,51 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile return reconcile.Result{}, nil } - // Validate that the tier watch is ready before querying the tier to ensure we utilize the cache. - if !r.tierWatchReady.IsReady() { - r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Tier watch to be established", nil, reqLogger) - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } - - // Ensure the allow-tigera tier exists, before rendering any network policies within it. - if err := r.client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil { - if errors.IsNotFound(err) { - r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for allow-tigera tier to be created", err, reqLogger) + if variant == operatorv1.TigeraSecureEnterprise { + // Validate that the tier watch is ready before querying the tier to ensure we utilize the cache. + if !r.tierWatchReady.IsReady() { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Tier watch to be established", nil, reqLogger) return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } else { - log.Error(err, "Error querying allow-tigera tier") - r.status.SetDegraded(operatorv1.ResourceNotReady, "Error querying allow-tigera tier", err, reqLogger) - return reconcile.Result{}, err } - } - if !r.licenseAPIReady.IsReady() { - r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for LicenseKeyAPI to be ready", nil, reqLogger) - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } + // Default fields on the LogCollector instance if needed. + // Ensure the allow-tigera tier exists, before rendering any network policies within it. + if err := r.client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil { + if errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for allow-tigera tier to be created", err, reqLogger) + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } else { + log.Error(err, "Error querying allow-tigera tier") + r.status.SetDegraded(operatorv1.ResourceNotReady, "Error querying allow-tigera tier", err, reqLogger) + return reconcile.Result{}, err + } + } - license, err := utils.FetchLicenseKey(ctx, r.client) - if err != nil { - if errors.IsNotFound(err) { - r.status.SetDegraded(operatorv1.ResourceNotFound, "License not found", err, reqLogger) - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + preDefaultPatchFrom := client.MergeFrom(instance.DeepCopy()) + modifiedFields := fillDefaults(instance) + if len(modifiedFields) > 0 { + if err = r.client.Patch(ctx, instance, preDefaultPatchFrom); err != nil { + r.status.SetDegraded(operatorv1.ResourcePatchError, fmt.Sprintf("Failed to set defaults for LogCollector fields: [%s]", + strings.Join(modifiedFields, ", "), + ), err, reqLogger) + return reconcile.Result{}, err + } } - r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying license", err, reqLogger) - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } - // Fetch the Installation instance. We need this for a few reasons. - // - We need to make sure it has successfully completed installation. - // - We need to get the registry information from its spec. - variant, installation, err := utils.GetInstallation(ctx, r.client) - if err != nil { - if errors.IsNotFound(err) { - r.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, reqLogger) - return reconcile.Result{}, err + if !r.licenseAPIReady.IsReady() { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for LicenseKeyAPI to be ready", nil, reqLogger) + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } - r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying installation", err, reqLogger) - return reconcile.Result{}, err - } - esClusterConfig, err := utils.GetElasticsearchClusterConfig(ctx, r.client) - if err != nil { - if errors.IsNotFound(err) { - r.status.SetDegraded(operatorv1.ResourceNotReady, "Elasticsearch cluster configuration is not available, waiting for it to become available", err, reqLogger) - return reconcile.Result{}, nil + license, err = utils.FetchLicenseKey(ctx, r.client) + if err != nil { + if errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotFound, "License not found", err, reqLogger) + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } + r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying license", err, reqLogger) + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } - r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get the elasticsearch cluster configuration", err, reqLogger) - return reconcile.Result{}, err } pullSecrets, err := utils.GetNetworkingPullSecrets(installation, r.client) @@ -367,6 +379,18 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile r.status.SetDegraded(operatorv1.ResourceReadError, "Error retrieving pull secrets", err, reqLogger) return reconcile.Result{}, err } + var esClusterConfig *relasticsearch.ClusterConfig + if variant == operatorv1.TigeraSecureEnterprise { + esClusterConfig, err = utils.GetElasticsearchClusterConfig(ctx, r.client) + if err != nil { + if errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Elasticsearch cluster configuration is not available, waiting for it to become available", err, reqLogger) + return reconcile.Result{}, nil + } + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get the elasticsearch cluster configuration", err, reqLogger) + return reconcile.Result{}, err + } + } // Try to grab the ManagementClusterConnection CR because we need it for network policy rendering, // as well as validation with respect to Syslog.logTypes. @@ -381,10 +405,13 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } managedCluster := managementClusterConnection != nil - managementCluster, err := utils.GetManagementCluster(ctx, r.client) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementCluster", err, reqLogger) - return reconcile.Result{}, err + var managementCluster *operatorv1.ManagementCluster + if variant == operatorv1.TigeraSecureEnterprise { + managementCluster, err = utils.GetManagementCluster(ctx, r.client) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementCluster", err, reqLogger) + return reconcile.Result{}, err + } } certificateManager, err := certificatemanager.Create(r.client, installation, r.clusterDomain, common.OperatorNamespace()) @@ -400,13 +427,16 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile return reconcile.Result{}, err } - prometheusCertificate, err := certificateManager.GetCertificate(r.client, monitor.PrometheusClientTLSSecretName, common.OperatorNamespace()) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get certificate", err, reqLogger) - return reconcile.Result{}, err - } else if prometheusCertificate == nil { - r.status.SetDegraded(operatorv1.ResourceNotReady, "Prometheus secrets are not available yet, waiting until they become available", nil, reqLogger) - return reconcile.Result{RequeueAfter: 5 * time.Second}, nil + var prometheusCertificate certificatemanagement.CertificateInterface + if variant == operatorv1.TigeraSecureEnterprise { + prometheusCertificate, err = certificateManager.GetCertificate(r.client, monitor.PrometheusClientTLSSecretName, common.OperatorNamespace()) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get certificate", err, reqLogger) + return reconcile.Result{}, err + } else if prometheusCertificate == nil { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Prometheus secrets are not available yet, waiting until they become available", nil, reqLogger) + return reconcile.Result{RequeueAfter: 5 * time.Second}, nil + } } // Determine whether or not this is a multi-tenant management cluster. @@ -453,8 +483,12 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile return reconcile.Result{}, nil } + trustedBundleCerts := []certificatemanagement.CertificateInterface{linseedCertificate} + if variant == operatorv1.TigeraSecureEnterprise { + trustedBundleCerts = append(trustedBundleCerts, prometheusCertificate) + } // Fluentd needs to mount system certificates in the case where Splunk, Syslog or AWS are used. - trustedBundle, err := certificateManager.CreateTrustedBundleWithSystemRootCertificates(prometheusCertificate, linseedCertificate) + trustedBundle, err := certificateManager.CreateTrustedBundleWithSystemRootCertificates(trustedBundleCerts...) if err != nil { r.status.SetDegraded(operatorv1.ResourceCreateError, "Unable to create tigera-ca-bundle configmap", err, reqLogger) return reconcile.Result{}, err @@ -462,72 +496,74 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile certificateManager.AddToStatusManager(r.status, render.LogCollectorNamespace) - exportLogs := utils.IsFeatureActive(license, common.ExportLogsFeature) - if !exportLogs && instance.Spec.AdditionalStores != nil { - r.status.SetDegraded(operatorv1.ResourceValidationError, "Feature is not active - License does not support feature: export-logs", nil, reqLogger) - return reconcile.Result{}, err - } - var s3Credential *render.S3Credential - if instance.Spec.AdditionalStores != nil { - if instance.Spec.AdditionalStores.S3 != nil { - s3Credential, err = getS3Credential(r.client) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceValidationError, "Error with S3 credential secret", err, reqLogger) - return reconcile.Result{}, err - } - if s3Credential == nil { - r.status.SetDegraded(operatorv1.ResourceNotFound, "S3 credential secret does not exist", nil, reqLogger) - return reconcile.Result{}, nil - } + var splunkCredential *render.SplunkCredential + var useSyslogCertificate bool + if variant == operatorv1.TigeraSecureEnterprise { + exportLogs := utils.IsFeatureActive(license, common.ExportLogsFeature) + if !exportLogs && instance.Spec.AdditionalStores != nil { + r.status.SetDegraded(operatorv1.ResourceValidationError, "Feature is not active - License does not support feature: export-logs", nil, reqLogger) + return reconcile.Result{}, err } - } - var splunkCredential *render.SplunkCredential - if instance.Spec.AdditionalStores != nil { - if instance.Spec.AdditionalStores.Splunk != nil { - splunkCredential, err = getSplunkCredential(r.client) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceValidationError, "Error with Splunk credential secret", err, reqLogger) - return reconcile.Result{}, err - } - if splunkCredential == nil { - r.status.SetDegraded(operatorv1.ResourceNotFound, "Splunk credential secret does not exist", nil, reqLogger) - return reconcile.Result{}, nil + if instance.Spec.AdditionalStores != nil { + if instance.Spec.AdditionalStores.S3 != nil { + s3Credential, err = getS3Credential(r.client) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceValidationError, "Error with S3 credential secret", err, reqLogger) + return reconcile.Result{}, err + } + if s3Credential == nil { + r.status.SetDegraded(operatorv1.ResourceNotFound, "S3 credential secret does not exist", nil, reqLogger) + return reconcile.Result{}, nil + } } } - } - var useSyslogCertificate bool - if instance.Spec.AdditionalStores != nil { - if instance.Spec.AdditionalStores.Syslog != nil && instance.Spec.AdditionalStores.Syslog.Encryption == v1.EncryptionTLS { - syslogCert, err := getSysLogCertificate(r.client) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error loading Syslog certificate", err, reqLogger) - return reconcile.Result{}, err - } - if syslogCert != nil { - useSyslogCertificate = true - trustedBundle.AddCertificates(syslogCert) + if instance.Spec.AdditionalStores != nil { + if instance.Spec.AdditionalStores.Splunk != nil { + splunkCredential, err = getSplunkCredential(r.client) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceValidationError, "Error with Splunk credential secret", err, reqLogger) + return reconcile.Result{}, err + } + if splunkCredential == nil { + r.status.SetDegraded(operatorv1.ResourceNotFound, "Splunk credential secret does not exist", nil, reqLogger) + return reconcile.Result{}, nil + } } } - } - if instance.Spec.AdditionalStores != nil { - if instance.Spec.AdditionalStores.Syslog != nil { - syslog := instance.Spec.AdditionalStores.Syslog + if instance.Spec.AdditionalStores != nil { + if instance.Spec.AdditionalStores.Syslog != nil && instance.Spec.AdditionalStores.Syslog.Encryption == v1.EncryptionTLS { + syslogCert, err := getSysLogCertificate(r.client) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error loading Syslog certificate", err, reqLogger) + return reconcile.Result{}, err + } + if syslogCert != nil { + useSyslogCertificate = true + trustedBundle.AddCertificates(syslogCert) + } + } + } - // If the user set Syslog.logTypes, we need to ensure that they did not include - // the v1.SyslogLogIDSEvents option if this is a managed cluster (i.e. - // ManagementClusterConnection CR is present). This is because IDS events - // are only forwarded within a non-managed cluster (where LogStorage is present). - if syslog.LogTypes != nil { - if err == nil && managedCluster { - for _, l := range syslog.LogTypes { - // Set status to degraded to warn user and let them fix the issue themselves. - if l == v1.SyslogLogIDSEvents { - r.status.SetDegraded(operatorv1.ResourceValidationError, "IDSEvents option is not supported for Syslog config in a managed cluster", nil, reqLogger) - return reconcile.Result{}, err + if instance.Spec.AdditionalStores != nil { + if instance.Spec.AdditionalStores.Syslog != nil { + syslog := instance.Spec.AdditionalStores.Syslog + + // If the user set Syslog.logTypes, we need to ensure that they did not include + // the v1.SyslogLogIDSEvents option if this is a managed cluster (i.e. + // ManagementClusterConnection CR is present). This is because IDS events + // are only forwarded within a non-managed cluster (where LogStorage is present). + if syslog.LogTypes != nil { + if err == nil && managedCluster { + for _, l := range syslog.LogTypes { + // Set status to degraded to warn user and let them fix the issue themselves. + if l == v1.SyslogLogIDSEvents { + r.status.SetDegraded(operatorv1.ResourceValidationError, "IDSEvents option is not supported for Syslog config in a managed cluster", nil, reqLogger) + return reconcile.Result{}, err + } } } } @@ -542,21 +578,58 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } var eksConfig *render.EksCloudwatchLogConfig - if installation.KubernetesProvider == operatorv1.ProviderEKS { - log.Info("Managed kubernetes EKS found, getting necessary credentials and config") - if instance.Spec.AdditionalSources != nil { - if instance.Spec.AdditionalSources.EksCloudwatchLog != nil { - eksConfig, err = getEksCloudwatchLogConfig(r.client, - instance.Spec.AdditionalSources.EksCloudwatchLog.FetchInterval, - instance.Spec.AdditionalSources.EksCloudwatchLog.Region, - instance.Spec.AdditionalSources.EksCloudwatchLog.GroupName, - instance.Spec.AdditionalSources.EksCloudwatchLog.StreamPrefix) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error retrieving EKS Cloudwatch Logs configuration", err, reqLogger) - return reconcile.Result{}, err + if variant == operatorv1.TigeraSecureEnterprise { + if installation.KubernetesProvider == operatorv1.ProviderEKS { + log.Info("Managed kubernetes EKS found, getting necessary credentials and config") + if instance.Spec.AdditionalSources != nil { + if instance.Spec.AdditionalSources.EksCloudwatchLog != nil { + eksConfig, err = getEksCloudwatchLogConfig(r.client, + instance.Spec.AdditionalSources.EksCloudwatchLog.FetchInterval, + instance.Spec.AdditionalSources.EksCloudwatchLog.Region, + instance.Spec.AdditionalSources.EksCloudwatchLog.GroupName, + instance.Spec.AdditionalSources.EksCloudwatchLog.StreamPrefix) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error retrieving EKS Cloudwatch Logs configuration", err, reqLogger) + return reconcile.Result{}, err + } } } } + } else { + // patch and get the felix configuration + _, err = utils.PatchFelixConfiguration(ctx, r.client, func(fc *crdv1.FelixConfiguration) bool { + patchRequired := false + if fc.Spec.FlowLogsFileEnabled == nil || !(*fc.Spec.FlowLogsFileEnabled) { + fc.Spec.FlowLogsFileEnabled = ptr.BoolToPtr(true) + patchRequired = true + } + + if fc.Spec.FlowLogsFileIncludeService == nil || !(*fc.Spec.FlowLogsFileIncludeService) { + fc.Spec.FlowLogsFileIncludeService = ptr.BoolToPtr(true) + patchRequired = true + } + + if fc.Spec.FlowLogsFileIncludePolicies == nil || !(*fc.Spec.FlowLogsFileIncludePolicies) { + fc.Spec.FlowLogsFileIncludePolicies = ptr.BoolToPtr(true) + patchRequired = true + } + + if fc.Spec.FlowLogsEnableHostEndpoint == nil || !(*fc.Spec.FlowLogsEnableHostEndpoint) { + fc.Spec.FlowLogsEnableHostEndpoint = ptr.BoolToPtr(true) + patchRequired = true + } + + if fc.Spec.FlowLogsEnableNetworkSets == nil || !(*fc.Spec.FlowLogsEnableNetworkSets) { + fc.Spec.FlowLogsEnableNetworkSets = ptr.BoolToPtr(true) + patchRequired = true + } + return patchRequired // proceed with this patch + }) + if err != nil { + reqLogger.Error(err, "Error patching felix configuration") + r.status.SetDegraded(operatorv1.ResourcePatchError, "Error patching felix configuration", err, reqLogger) + return reconcile.Result{}, err + } } // Create a component handler to manage the rendered component. @@ -607,42 +680,44 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } // Render a fluentd component for Windows if the cluster has Windows nodes. - hasWindowsNodes, err := hasWindowsNodes(r.client) - if err != nil { - return reconcile.Result{}, err - } - - if hasWindowsNodes { - fluentdCfg = &render.FluentdConfiguration{ - LogCollector: instance, - ESClusterConfig: esClusterConfig, - S3Credential: s3Credential, - SplkCredential: splunkCredential, - Filters: filters, - EKSConfig: eksConfig, - PullSecrets: pullSecrets, - Installation: installation, - ClusterDomain: r.clusterDomain, - OSType: rmeta.OSTypeWindows, - TrustedBundle: trustedBundle, - ManagedCluster: managedCluster, - UsePSP: r.usePSP, - UseSyslogCertificate: useSyslogCertificate, - FluentdKeyPair: fluentdKeyPair, - } - comp = render.Fluentd(fluentdCfg) - - if err = imageset.ApplyImageSet(ctx, r.client, variant, comp); err != nil { - r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error with images from ImageSet", err, reqLogger) + if variant == operatorv1.TigeraSecureEnterprise { + hasWindowsNodes, err := hasWindowsNodes(r.client) + if err != nil { return reconcile.Result{}, err } - // Create a component handler to manage the rendered component. - handler = utils.NewComponentHandler(log, r.client, r.scheme, instance) + if hasWindowsNodes { + fluentdCfg = &render.FluentdConfiguration{ + LogCollector: instance, + ESClusterConfig: esClusterConfig, + S3Credential: s3Credential, + SplkCredential: splunkCredential, + Filters: filters, + EKSConfig: eksConfig, + PullSecrets: pullSecrets, + Installation: installation, + ClusterDomain: r.clusterDomain, + OSType: rmeta.OSTypeWindows, + TrustedBundle: trustedBundle, + ManagedCluster: managedCluster, + UsePSP: r.usePSP, + UseSyslogCertificate: useSyslogCertificate, + FluentdKeyPair: fluentdKeyPair, + } + comp = render.Fluentd(fluentdCfg) + + if err = imageset.ApplyImageSet(ctx, r.client, variant, comp); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error with images from ImageSet", err, reqLogger) + return reconcile.Result{}, err + } - if err := handler.CreateOrUpdateOrDelete(ctx, comp, r.status); err != nil { - r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) - return reconcile.Result{}, err + // Create a component handler to manage the rendered component. + handler = utils.NewComponentHandler(log, r.client, r.scheme, instance) + + if err := handler.CreateOrUpdateOrDelete(ctx, comp, r.status); err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger) + return reconcile.Result{}, err + } } } diff --git a/pkg/controller/logcollector/logcollector_controller_test.go b/pkg/controller/logcollector/logcollector_controller_test.go index 4e36d41de0..1ef8426def 100644 --- a/pkg/controller/logcollector/logcollector_controller_test.go +++ b/pkg/controller/logcollector/logcollector_controller_test.go @@ -37,6 +37,7 @@ import ( v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" operatorv1 "github.com/tigera/operator/api/v1" "github.com/tigera/operator/pkg/apis" + crdv1 "github.com/tigera/operator/pkg/apis/crd.projectcalico.org/v1" "github.com/tigera/operator/pkg/common" "github.com/tigera/operator/pkg/components" "github.com/tigera/operator/pkg/controller/certificatemanager" @@ -179,8 +180,8 @@ var _ = Describe("LogCollector controller tests", func() { Expect(node).ToNot(BeNil()) Expect(node.Image).To(Equal( fmt.Sprintf("some.registry.org/%s:%s", - components.ComponentFluentd.Image, - components.ComponentFluentd.Version))) + components.ComponentTigeraFluentd.Image, + components.ComponentTigeraFluentd.Version))) }) It("should use images from imageset", func() { Expect(c.Create(ctx, &operatorv1.ImageSet{ @@ -218,7 +219,7 @@ var _ = Describe("LogCollector controller tests", func() { Expect(node).ToNot(BeNil()) Expect(node.Image).To(Equal( fmt.Sprintf("some.registry.org/%s@%s", - components.ComponentFluentd.Image, + components.ComponentTigeraFluentd.Image, "sha256:fluentdhash"))) ds.Name = "fluentd-node-windows" @@ -228,7 +229,7 @@ var _ = Describe("LogCollector controller tests", func() { Expect(node).ToNot(BeNil()) Expect(node.Image).To(Equal( fmt.Sprintf("some.registry.org/%s@%s", - components.ComponentFluentdWindows.Image, + components.ComponentTigeraFluentdWindows.Image, "sha256:fluentdwindowshash"))) }) @@ -795,3 +796,170 @@ var _ = Describe("LogCollector controller tests", func() { }) }) }) + +var _ = Describe("LogCollector controller tests (OSS)", func() { + var c client.Client + var ctx context.Context + var r ReconcileLogCollector + var scheme *runtime.Scheme + var mockStatus *status.MockStatus + + BeforeEach(func() { + // The schema contains all objects that should be known to the fake client when the test runs. + scheme = runtime.NewScheme() + Expect(apis.AddToScheme(scheme)).NotTo(HaveOccurred()) + Expect(appsv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(rbacv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(batchv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(operatorv1.SchemeBuilder.AddToScheme(scheme)).NotTo(HaveOccurred()) + + // Create a client that will have a crud interface of k8s objects. + c = fake.NewClientBuilder().WithScheme(scheme).Build() + ctx = context.Background() + + // Create an object we can use throughout the test to do the compliance reconcile loops. + mockStatus = &status.MockStatus{} + mockStatus.On("AddDaemonsets", mock.Anything).Return() + mockStatus.On("AddDeployments", mock.Anything).Return() + mockStatus.On("AddStatefulSets", mock.Anything).Return() + mockStatus.On("AddCronJobs", mock.Anything) + mockStatus.On("RemoveCertificateSigningRequests", mock.Anything).Return() + mockStatus.On("AddCertificateSigningRequests", mock.Anything).Return() + mockStatus.On("IsAvailable").Return(true) + mockStatus.On("OnCRFound").Return() + mockStatus.On("ClearDegraded") + mockStatus.On("SetDegraded", operatorv1.InvalidConfigurationError, "Additional log sources can be configured only in calico enterprise.", mock.Anything, mock.Anything).Return().Maybe() + mockStatus.On("ReadyToMonitor") + mockStatus.On("SetMetaData", mock.Anything).Return() + + // Create an object we can use throughout the test to do the compliance reconcile loops. + // As the parameters in the client changes, we expect the outcomes of the reconcile loops to change. + r = ReconcileLogCollector{ + client: c, + scheme: scheme, + provider: operatorv1.ProviderNone, + status: mockStatus, + } + + // We start off with a 'standard' installation, with nothing special + Expect(c.Create( + ctx, + &operatorv1.Installation{ + ObjectMeta: metav1.ObjectMeta{Name: "default"}, + Spec: operatorv1.InstallationSpec{ + Variant: operatorv1.Calico, + Registry: "some.registry.org/", + }, + Status: operatorv1.InstallationStatus{ + Variant: operatorv1.Calico, + Computed: &operatorv1.InstallationSpec{ + Registry: "my-reg", + // The test is provider agnostic. + KubernetesProvider: operatorv1.ProviderNone, + }, + }, + })).NotTo(HaveOccurred()) + + // Create resources LogCollector depends on + Expect(c.Create(ctx, &operatorv1.APIServer{ + ObjectMeta: metav1.ObjectMeta{Name: "tigera-secure"}, + Status: operatorv1.APIServerStatus{State: operatorv1.TigeraStatusReady}, + })).NotTo(HaveOccurred()) + + Expect(c.Create(ctx, relasticsearch.NewClusterConfig("cluster", 1, 1, 1).ConfigMap())).NotTo(HaveOccurred()) + + certificateManager, err := certificatemanager.Create(c, nil, "", common.OperatorNamespace(), certificatemanager.AllowCACreation()) + Expect(err).NotTo(HaveOccurred()) + Expect(c.Create(ctx, certificateManager.KeyPair().Secret(common.OperatorNamespace()))) // Persist the root-ca in the operator namespace. + + kibanaTLS, err := certificateManager.GetOrCreateKeyPair(c, relasticsearch.PublicCertSecret, common.OperatorNamespace(), []string{relasticsearch.PublicCertSecret}) + Expect(err).NotTo(HaveOccurred()) + Expect(c.Create(ctx, kibanaTLS.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) + Expect(c.Create(ctx, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: render.ElasticsearchEksLogForwarderUserSecret, + Namespace: "tigera-operator", + }, + })).NotTo(HaveOccurred()) + + prometheusTLS, err := certificateManager.GetOrCreateKeyPair(c, monitor.PrometheusClientTLSSecretName, common.OperatorNamespace(), []string{monitor.PrometheusTLSSecretName}) + Expect(err).NotTo(HaveOccurred()) + Expect(c.Create(ctx, prometheusTLS.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) + + linseedTLS, err := certificateManager.GetOrCreateKeyPair(c, render.TigeraLinseedSecret, common.OperatorNamespace(), []string{render.LinseedServiceName}) + Expect(err).NotTo(HaveOccurred()) + Expect(c.Create(ctx, linseedTLS.Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) + + Expect(c.Create(ctx, &crdv1.FelixConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + })).NotTo(HaveOccurred()) + + // Apply the logcollector CR to the fake cluster. + Expect(c.Create(ctx, &operatorv1.LogCollector{ + ObjectMeta: metav1.ObjectMeta{Name: "tigera-secure"}, + })).NotTo(HaveOccurred()) + }) + + Context("image reconciliation", func() { + It("should use builtin images", func() { + _, err := r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + + ds := appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{Kind: "DaemonSet", APIVersion: "apps/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "fluentd-node", + Namespace: render.LogCollectorNamespace, + }, + } + Expect(test.GetResource(c, &ds)).To(BeNil()) + Expect(ds.Spec.Template.Spec.Containers).To(HaveLen(1)) + node := ds.Spec.Template.Spec.Containers[0] + Expect(node).ToNot(BeNil()) + Expect(node.Image).To(Equal( + fmt.Sprintf("some.registry.org/%s:%s", + components.ComponentCalicoFluentd.Image, + components.ComponentCalicoFluentd.Version))) + }) + }) + + Context("should enabled flow logs felix configs", func() { + It("should set the proper flow logs felix configs", func() { + _, err := r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + fc := &crdv1.FelixConfiguration{} + Expect(c.Get(ctx, types.NamespacedName{Name: "default", Namespace: ""}, fc)).NotTo(HaveOccurred()) + Expect(*fc.Spec.FlowLogsFileEnabled).Should(BeTrue()) + Expect(*fc.Spec.FlowLogsFileIncludePolicies).Should(BeTrue()) + Expect(*fc.Spec.FlowLogsFileIncludeService).Should(BeTrue()) + Expect(*fc.Spec.FlowLogsEnableHostEndpoint).Should(BeTrue()) + Expect(*fc.Spec.FlowLogsEnableNetworkSets).Should(BeTrue()) + }) + }) + + Context("should throw error when additional log collectors are configured", func() { + BeforeEach(func() { + Expect(c.Delete(ctx, &operatorv1.LogCollector{ + ObjectMeta: metav1.ObjectMeta{Name: "tigera-secure"}, + })).NotTo(HaveOccurred()) + Expect(c.Create(ctx, &operatorv1.LogCollector{ + ObjectMeta: metav1.ObjectMeta{Name: "tigera-secure"}, + Spec: operatorv1.LogCollectorSpec{ + AdditionalStores: &operatorv1.AdditionalLogStoreSpec{ + S3: &operatorv1.S3StoreSpec{ + BucketName: "s3Bucket", + Region: "s3Region", + BucketPath: "s3Path", + }, + }, + }, + })).NotTo(HaveOccurred()) + }) + It("should return error when configuring additional log collectors", func() { + _, err := r.Reconcile(ctx, reconcile.Request{}) + Expect(err).Should(HaveOccurred()) + }) + }) +}) diff --git a/pkg/controller/logstorage/managedcluster/managed_cluster_controller.go b/pkg/controller/logstorage/managedcluster/managed_cluster_controller.go index a744215ab3..f0a17676b1 100644 --- a/pkg/controller/logstorage/managedcluster/managed_cluster_controller.go +++ b/pkg/controller/logstorage/managedcluster/managed_cluster_controller.go @@ -51,10 +51,6 @@ type LogStorageManagedClusterController struct { } func Add(mgr manager.Manager, opts options.AddOptions) error { - if !opts.EnterpriseCRDExists { - return nil - } - // Create the reconciler r := &LogStorageManagedClusterController{ client: mgr.GetClient(), @@ -70,26 +66,27 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { } // Configure watches for operator.tigera.io APIs this controller cares about. - if err = c.Watch(&source.Kind{Type: &operatorv1.LogStorage{}}, &handler.EnqueueRequestForObject{}); err != nil { - return fmt.Errorf("log-storage-managedcluster-controller failed to watch LogStorage resource: %w", err) + if opts.EnterpriseCRDExists { + if err = c.Watch(&source.Kind{Type: &operatorv1.LogStorage{}}, &handler.EnqueueRequestForObject{}); err != nil { + return fmt.Errorf("log-storage-managedcluster-controller failed to watch LogStorage resource: %w", err) + } + if err = c.Watch(&source.Kind{Type: &operatorv1.ManagementCluster{}}, &handler.EnqueueRequestForObject{}); err != nil { + return fmt.Errorf("log-storage-managedcluster-controller failed to watch ManagementCluster resource: %w", err) + } + if err = utils.AddSecretsWatch(c, relasticsearch.PublicCertSecret, common.OperatorNamespace()); err != nil { + return fmt.Errorf("log-storage-managedcluster-controller failed to watch Secret resource: %w", err) + } + if err = utils.AddSecretsWatch(c, relasticsearch.PublicCertSecret, render.ElasticsearchNamespace); err != nil { + return fmt.Errorf("log-storage-managedcluster-controller failed to watch Secret resource: %w", err) + } } if err = utils.AddNetworkWatch(c); err != nil { return fmt.Errorf("log-storage-managedcluster-controller failed to watch Network resource: %w", err) } - if err = c.Watch(&source.Kind{Type: &operatorv1.ManagementCluster{}}, &handler.EnqueueRequestForObject{}); err != nil { - return fmt.Errorf("log-storage-managedcluster-controller failed to watch ManagementCluster resource: %w", err) - } if err = c.Watch(&source.Kind{Type: &operatorv1.ManagementClusterConnection{}}, &handler.EnqueueRequestForObject{}); err != nil { return fmt.Errorf("log-storage-managedcluster-controller failed to watch ManagementClusterConnection resource: %w", err) } - if err = utils.AddSecretsWatch(c, relasticsearch.PublicCertSecret, common.OperatorNamespace()); err != nil { - return fmt.Errorf("log-storage-managedcluster-controller failed to watch Secret resource: %w", err) - } - if err = utils.AddSecretsWatch(c, relasticsearch.PublicCertSecret, render.ElasticsearchNamespace); err != nil { - return fmt.Errorf("log-storage-managedcluster-controller failed to watch Secret resource: %w", err) - } - return nil } @@ -115,26 +112,24 @@ func (r *LogStorageManagedClusterController) Reconcile(ctx context.Context, requ } return reconcile.Result{}, err } - if variant != operatorv1.TigeraSecureEnterprise { - return reconcile.Result{}, nil - } - - managementCluster, err := utils.GetManagementCluster(ctx, r.client) - if err != nil { - return reconcile.Result{}, err - } - if managementCluster != nil { - // ManagementCluster is not supported on a managed cluster. Return an error. - return reconcile.Result{}, fmt.Errorf("ManagementCluster is not supported on a managed cluster") - } + if variant == operatorv1.TigeraSecureEnterprise { + managementCluster, err := utils.GetManagementCluster(ctx, r.client) + if err != nil { + return reconcile.Result{}, err + } + if managementCluster != nil { + // ManagementCluster is not supported on a managed cluster. Return an error. + return reconcile.Result{}, fmt.Errorf("ManagementCluster is not supported on a managed cluster") + } - exists, err := utils.LogStorageExists(ctx, r.client) - if err != nil { - return reconcile.Result{}, err - } - if exists { - // LogStorage is not supported on a managed cluster. Return an error. - return reconcile.Result{}, fmt.Errorf("LogStorage is not supported on a managed cluster") + exists, err := utils.LogStorageExists(ctx, r.client) + if err != nil { + return reconcile.Result{}, err + } + if exists { + // LogStorage is not supported on a managed cluster. Return an error. + return reconcile.Result{}, fmt.Errorf("LogStorage is not supported on a managed cluster") + } } // Create the component and install it. diff --git a/pkg/controller/logstorage/managedcluster/managed_cluster_controller_test.go b/pkg/controller/logstorage/managedcluster/managed_cluster_controller_test.go index 776fb3dda9..7c851cee2b 100644 --- a/pkg/controller/logstorage/managedcluster/managed_cluster_controller_test.go +++ b/pkg/controller/logstorage/managedcluster/managed_cluster_controller_test.go @@ -143,4 +143,63 @@ var _ = Describe("LogStorageManagedCluster controller", func() { }) }) }) + +}) + +var _ = Describe("LogStorageManagedCluster controller OSS", func() { + var ( + cli client.Client + scheme *runtime.Scheme + ctx context.Context + install *operatorv1.Installation + ) + + BeforeEach(func() { + scheme = runtime.NewScheme() + Expect(apis.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(storagev1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(appsv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(rbacv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(batchv1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + Expect(admissionv1beta1.SchemeBuilder.AddToScheme(scheme)).ShouldNot(HaveOccurred()) + + ctx = context.Background() + cli = fake.NewClientBuilder().WithScheme(scheme).Build() + + var replicas int32 = 2 + install = &operatorv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Status: operatorv1.InstallationStatus{ + Variant: operatorv1.TigeraSecureEnterprise, + Computed: &operatorv1.InstallationSpec{}, + }, + Spec: operatorv1.InstallationSpec{ + ControlPlaneReplicas: &replicas, + Variant: operatorv1.Calico, + }, + } + Expect(cli.Create(ctx, install)).ShouldNot(HaveOccurred()) + Expect(cli.Create(ctx, &operatorv1.ManagementClusterConnection{ObjectMeta: metav1.ObjectMeta{Name: utils.DefaultTSEEInstanceKey.Name}})).NotTo(HaveOccurred()) + }) + + Context("ExternalService is correctly setup", func() { + DescribeTable("tests that the ExternalService is setup with the default service name", func(clusterDomain, expectedSvcName string) { + r, err := NewReconcilerWithShims(cli, scheme, operatorv1.ProviderNone, clusterDomain) + Expect(err).ShouldNot(HaveOccurred()) + _, err = r.Reconcile(ctx, reconcile.Request{}) + Expect(err).ShouldNot(HaveOccurred()) + svc := &corev1.Service{} + Expect( + cli.Get(ctx, client.ObjectKey{Name: render.LinseedServiceName, Namespace: render.ElasticsearchNamespace}, svc), + ).ShouldNot(HaveOccurred()) + + Expect(svc.Spec.ExternalName).Should(Equal(expectedSvcName)) + Expect(svc.Spec.Type).Should(Equal(corev1.ServiceTypeExternalName)) + }, + Entry("default cluster domain", dns.DefaultClusterDomain, "tigera-guardian.tigera-guardian.svc.cluster.local"), + Entry("custom cluster domain", "custom-domain.internal", "tigera-guardian.tigera-guardian.svc.custom-domain.internal"), + ) + }) }) diff --git a/pkg/controller/utils/discovery.go b/pkg/controller/utils/discovery.go index aad4875670..d2d9a0e6be 100644 --- a/pkg/controller/utils/discovery.go +++ b/pkg/controller/utils/discovery.go @@ -45,8 +45,6 @@ func RequiresTigeraSecure(cfg *rest.Config) (bool, error) { } for _, r := range resources.APIResources { switch r.Kind { - case "LogCollector": - fallthrough case "LogStorage": fallthrough case "AmazonCloudIntegration": @@ -62,8 +60,6 @@ func RequiresTigeraSecure(cfg *rest.Config) (bool, error) { case "ManagementCluster": fallthrough case "EgressGateway": - fallthrough - case "ManagementClusterConnection": return true, nil } } diff --git a/pkg/crds/calico/crd.projectcalico.org_globalnetworkpolicies.yaml b/pkg/crds/calico/crd.projectcalico.org_globalnetworkpolicies.yaml index e0e83d0df7..d2b5fc4d21 100644 --- a/pkg/crds/calico/crd.projectcalico.org_globalnetworkpolicies.yaml +++ b/pkg/crds/calico/crd.projectcalico.org_globalnetworkpolicies.yaml @@ -799,6 +799,19 @@ spec: with identical order will be applied in alphanumerical order based on the Policy "Name". type: number + performanceHints: + description: "PerformanceHints contains a list of hints to Calico's + policy engine to help process the policy more efficiently. Hints + never change the enforcement behaviour of the policy. \n Currently, + the only available hint is \"AssumeNeededOnEveryNode\". When that + hint is set on a policy, Felix will act as if the policy matches + a local endpoint even if it does not. This is useful for \"preloading\" + any large static policies that are known to be used on every node. + If the policy is _not_ used on a particular node then the work done + to preload the policy (and to maintain it) is wasted." + items: + type: string + type: array preDNAT: description: PreDNAT indicates to apply the rules in this policy before any DNAT. diff --git a/pkg/crds/calico/crd.projectcalico.org_networkpolicies.yaml b/pkg/crds/calico/crd.projectcalico.org_networkpolicies.yaml index d5645d1a6b..d9a3999803 100644 --- a/pkg/crds/calico/crd.projectcalico.org_networkpolicies.yaml +++ b/pkg/crds/calico/crd.projectcalico.org_networkpolicies.yaml @@ -784,6 +784,19 @@ spec: with identical order will be applied in alphanumerical order based on the Policy "Name". type: number + performanceHints: + description: "PerformanceHints contains a list of hints to Calico's + policy engine to help process the policy more efficiently. Hints + never change the enforcement behaviour of the policy. \n Currently, + the only available hint is \"AssumeNeededOnEveryNode\". When that + hint is set on a policy, Felix will act as if the policy matches + a local endpoint even if it does not. This is useful for \"preloading\" + any large static policies that are known to be used on every node. + If the policy is _not_ used on a particular node then the work done + to preload the policy (and to maintain it) is wasted." + items: + type: string + type: array selector: description: "The selector is an expression used to pick pick out the endpoints that the policy should be applied to. \n Selector diff --git a/pkg/crds/enterprise/crd.projectcalico.org_securityeventwebhooks.yaml b/pkg/crds/enterprise/crd.projectcalico.org_securityeventwebhooks.yaml new file mode 100644 index 0000000000..f19eca84d2 --- /dev/null +++ b/pkg/crds/enterprise/crd.projectcalico.org_securityeventwebhooks.yaml @@ -0,0 +1,181 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: securityeventwebhooks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: SecurityEventWebhook + listKind: SecurityEventWebhookList + plural: securityeventwebhooks + singular: securityeventwebhook + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + config: + description: contains the SecurityEventWebhook's configuration associated + with the intended Consumer + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + description: Selects a key from a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + secretKeyRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + consumer: + description: 'indicates the SecurityEventWebhook intended consumer, + one of: Slack, Jira' + type: string + query: + description: defines the SecurityEventWebhook query to be executed + against fields of SecurityEvents + type: string + state: + description: 'defines the webhook desired state, one of: Enabled, + Disabled or Debug' + type: string + required: + - config + - consumer + - query + - state + type: object + status: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n \ttype FooStatus struct{ \t // Represents the observations of + a foo's current state. \t // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" \t // +patchMergeKey=type + \t // +patchStrategy=merge \t // +listType=map \t // +listMapKey=type + \t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n \t // other fields \t}" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned + from one status to another. This should be when the underlying + condition changed. If that is not known, then using the time + when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details + about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers of specific + condition types may define expected values and meanings for this + field, and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be + empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/pkg/render/apiserver.go b/pkg/render/apiserver.go index 3fd7fb3524..90ec283937 100644 --- a/pkg/render/apiserver.go +++ b/pkg/render/apiserver.go @@ -528,6 +528,10 @@ func (c *apiServerComponent) calicoCustomResourcesClusterRole() *rbacv1.ClusterR "ipamblocks", "blockaffinities", "ipamconfigs", + "stagedkubernetesnetworkpolicies", + "stagednetworkpolicies", + "stagedglobalnetworkpolicies", + "tiers", }, Verbs: []string{ "get", @@ -1251,10 +1255,6 @@ func (c *apiServerComponent) tigeraCustomResourcesClusterRole() *rbacv1.ClusterR // Calico Enterprise backing storage. APIGroups: []string{"crd.projectcalico.org"}, Resources: []string{ - "stagedkubernetesnetworkpolicies", - "stagednetworkpolicies", - "stagedglobalnetworkpolicies", - "tiers", "licensekeys", "alertexceptions", "globalalerts", diff --git a/pkg/render/fluentd.go b/pkg/render/fluentd.go index 323b3a590c..ede6b06df1 100644 --- a/pkg/render/fluentd.go +++ b/pkg/render/fluentd.go @@ -17,7 +17,6 @@ package render import ( "crypto/x509" "fmt" - "strconv" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -81,12 +80,13 @@ const ( SyslogCAConfigMapName = "syslog-ca" // Constants for Linseed token volume mounting in managed clusters. - LinseedTokenVolumeName = "linseed-token" - LinseedTokenKey = "token" - LinseedTokenSubPath = "token" - LinseedTokenSecret = "%s-tigera-linseed-token" - LinseedVolumeMountPath = "/var/run/secrets/tigera.io/linseed/" - LinseedTokenPath = "/var/run/secrets/tigera.io/linseed/token" + LinseedTokenVolumeName = "linseed-token" + LinseedTokenKey = "token" + LinseedTokenSubPath = "token" + LinseedTokenSecret = "%s-tigera-linseed-token" + LinseedVolumeMountPath = "/var/run/secrets/tigera.io/linseed/" + LinseedTokenPath = "/var/run/secrets/tigera.io/linseed/token" + LinseedServiceAccountName = "tigera-linseed" fluentdName = "tigera-fluentd" fluentdWindowsName = "tigera-fluentd-windows" @@ -128,11 +128,12 @@ type SplunkCredential struct { } func Fluentd(cfg *FluentdConfiguration) Component { - return &fluentdComponent{ + fluentd := &fluentdComponent{ cfg: cfg, probeTimeout: 10, probePeriod: 60, } + return fluentd } type EksCloudwatchLogConfig struct { @@ -182,16 +183,17 @@ func (c *fluentdComponent) ResolveImages(is *operatorv1.ImageSet) error { path := c.cfg.Installation.ImagePath prefix := c.cfg.Installation.ImagePrefix - if c.cfg.OSType == rmeta.OSTypeWindows { + if c.cfg.OSType == rmeta.OSTypeWindows && c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { var err error - c.image, err = components.GetReference(components.ComponentFluentdWindows, reg, path, prefix, is) + c.image, err = components.GetReference(components.ComponentTigeraFluentdWindows, reg, path, prefix, is) return err } var err error - c.image, err = components.GetReference(components.ComponentFluentd, reg, path, prefix, is) - if err != nil { - return err + if c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { + c.image, err = components.GetReference(components.ComponentTigeraFluentd, reg, path, prefix, is) + } else { + c.image, err = components.GetReference(components.ComponentCalicoFluentd, reg, path, prefix, is) } return err } @@ -256,8 +258,10 @@ func (c *fluentdComponent) path(path string) string { func (c *fluentdComponent) Objects() ([]client.Object, []client.Object) { var objs, toDelete []client.Object objs = append(objs, CreateNamespace(LogCollectorNamespace, c.cfg.Installation.KubernetesProvider, PSSPrivileged)) - objs = append(objs, c.allowTigeraPolicy()) objs = append(objs, secret.ToRuntimeObjects(secret.CopyToNamespace(LogCollectorNamespace, c.cfg.PullSecrets...)...)...) + if c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { + objs = append(objs, c.allowTigeraPolicy()) + } objs = append(objs, c.metricsService()) if c.cfg.Installation.KubernetesProvider == operatorv1.ProviderGKE { @@ -276,7 +280,7 @@ func (c *fluentdComponent) Objects() ([]client.Object, []client.Object) { if c.cfg.Filters != nil { objs = append(objs, c.filtersConfigMap()) } - if c.cfg.EKSConfig != nil && c.cfg.OSType == rmeta.OSTypeLinux { + if c.cfg.EKSConfig != nil && c.cfg.OSType == rmeta.OSTypeLinux && c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { if c.cfg.UsePSP { objs = append(objs, c.eksLogForwarderClusterRole(), @@ -302,7 +306,10 @@ func (c *fluentdComponent) Objects() ([]client.Object, []client.Object) { } objs = append(objs, c.fluentdServiceAccount()) - objs = append(objs, c.packetCaptureApiRole(), c.packetCaptureApiRoleBinding()) + + if c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { + objs = append(objs, c.packetCaptureApiRole(), c.packetCaptureApiRoleBinding()) + } objs = append(objs, c.daemonset()) return objs, toDelete @@ -619,16 +626,18 @@ func (c *fluentdComponent) envvars() []corev1.EnvVar { {Name: "LINSEED_ENABLED", Value: "true"}, {Name: "LINSEED_ENDPOINT", Value: relasticsearch.LinseedEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain, linseedNS)}, {Name: "LINSEED_CA_PATH", Value: c.trustedBundlePath()}, + {Name: "FLUENT_UID", Value: "0"}, {Name: "TLS_KEY_PATH", Value: c.keyPath()}, {Name: "TLS_CRT_PATH", Value: c.certPath()}, - {Name: "FLUENT_UID", Value: "0"}, {Name: "FLOW_LOG_FILE", Value: c.path("/var/log/calico/flowlogs/flows.log")}, - {Name: "DNS_LOG_FILE", Value: c.path("/var/log/calico/dnslogs/dns.log")}, {Name: "FLUENTD_ES_SECURE", Value: "true"}, {Name: "NODENAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "spec.nodeName"}}}, {Name: "LINSEED_TOKEN", Value: c.path(GetLinseedTokenPath(c.cfg.ManagedCluster))}, } + if c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { + envs = append(envs, corev1.EnvVar{Name: "DNS_LOG_FILE", Value: c.path("/var/log/calico/dnslogs/dns.log")}) + } if c.cfg.Tenant != nil { envs = append(envs, corev1.EnvVar{Name: "TENANT_ID", Value: c.cfg.Tenant.Spec.ID}) } @@ -779,24 +788,6 @@ func (c *fluentdComponent) envvars() []corev1.EnvVar { } } - envs = append(envs, - corev1.EnvVar{Name: "ELASTIC_FLOWS_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_DNS_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_AUDIT_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_BGP_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_WAF_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_L7_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - corev1.EnvVar{Name: "ELASTIC_RUNTIME_INDEX_REPLICAS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Replicas())}, - - corev1.EnvVar{Name: "ELASTIC_FLOWS_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.FlowShards())}, - corev1.EnvVar{Name: "ELASTIC_DNS_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_AUDIT_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_BGP_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_WAF_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_L7_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - corev1.EnvVar{Name: "ELASTIC_RUNTIME_INDEX_SHARDS", Value: strconv.Itoa(c.cfg.ESClusterConfig.Shards())}, - ) - if c.SupportedOSType() != rmeta.OSTypeWindows { envs = append(envs, corev1.EnvVar{Name: "CA_CRT_PATH", Value: c.cfg.TrustedBundle.MountPath()}, @@ -963,31 +954,35 @@ func (c *fluentdComponent) fluentdClusterRoleBinding() *rbacv1.ClusterRoleBindin } func (c *fluentdComponent) fluentdClusterRole() *rbacv1.ClusterRole { + linseedRule := rbacv1.PolicyRule{ + // Add write access to Linseed APIs. + APIGroups: []string{"linseed.tigera.io"}, + Resources: []string{ + "flowlogs", + }, + Verbs: []string{"create"}, + } + + if c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { + linseedRule.Resources = append(linseedRule.Resources, []string{ + "kube_auditlogs", + "ee_auditlogs", + "dnslogs", + "l7logs", + "events", + "bgplogs", + "waflogs", + "runtimereports", + }...) + } + role := &rbacv1.ClusterRole{ TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}, ObjectMeta: metav1.ObjectMeta{ Name: c.fluentdName(), }, - Rules: []rbacv1.PolicyRule{ - { - // Add write access to Linseed APIs. - APIGroups: []string{"linseed.tigera.io"}, - Resources: []string{ - "flowlogs", - "kube_auditlogs", - "ee_auditlogs", - "dnslogs", - "l7logs", - "events", - "bgplogs", - "waflogs", - "runtimereports", - }, - Verbs: []string{"create"}, - }, - }, + Rules: []rbacv1.PolicyRule{linseedRule}, } - if c.cfg.UsePSP { // Allow access to the pod security policy in case this is enforced on the cluster role.Rules = append(role.Rules, rbacv1.PolicyRule{ diff --git a/pkg/render/fluentd_test.go b/pkg/render/fluentd_test.go index 674999c80e..dca7746ec9 100644 --- a/pkg/render/fluentd_test.go +++ b/pkg/render/fluentd_test.go @@ -68,6 +68,7 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { OSType: rmeta.OSTypeLinux, Installation: &operatorv1.InstallationSpec{ KubernetesProvider: operatorv1.ProviderNone, + Variant: operatorv1.TigeraSecureEnterprise, }, FluentdKeyPair: metricsSecret, TrustedBundle: certificateManager.CreateTrustedBundle(), @@ -221,6 +222,98 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { Expect(ms.Spec.ClusterIP).To(Equal("None"), "metrics service should be headless to prevent kube-proxy from rendering too many iptables rules") }) + It("should render with a default configuration for calico OSS", func() { + expectedResources := []struct { + name string + ns string + group string + version string + kind string + }{ + {name: "tigera-fluentd", ns: "", group: "", version: "v1", kind: "Namespace"}, + {name: render.FluentdMetricsService, ns: render.LogCollectorNamespace, group: "", version: "v1", kind: "Service"}, + {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, + {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + {name: "tigera-fluentd", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, + {name: "fluentd-node", ns: "tigera-fluentd", group: "", version: "v1", kind: "ServiceAccount"}, + {name: "fluentd-node", ns: "tigera-fluentd", group: "apps", version: "v1", kind: "DaemonSet"}, + } + + // Should render the correct resources. + cfg.Installation.Variant = operatorv1.Calico + component := render.Fluentd(cfg) + resources, _ := component.Objects() + Expect(len(resources)).To(Equal(len(expectedResources))) + + i := 0 + for _, expectedRes := range expectedResources { + rtest.CompareResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) + i++ + } + + // Check the namespace. + ns := rtest.GetResource(resources, "tigera-fluentd", "", "", "v1", "Namespace").(*corev1.Namespace) + Expect(ns.Labels["pod-security.kubernetes.io/enforce"]).To(Equal("privileged")) + Expect(ns.Labels["pod-security.kubernetes.io/enforce-version"]).To(Equal("latest")) + + ds := rtest.GetResource(resources, "fluentd-node", "tigera-fluentd", "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) + Expect(ds.Spec.Template.Spec.Volumes[0].VolumeSource.HostPath.Path).To(Equal("/var/log/calico")) + Expect(ds.Spec.Template.Spec.Containers).To(HaveLen(1)) + envs := ds.Spec.Template.Spec.Containers[0].Env + + Expect(envs).Should(ContainElements( + corev1.EnvVar{Name: "LINSEED_ENABLED", Value: "true"}, + corev1.EnvVar{Name: "LINSEED_ENDPOINT", Value: "https://tigera-linseed.tigera-elasticsearch.svc"}, + corev1.EnvVar{Name: "LINSEED_CA_PATH", Value: "/etc/pki/tls/certs/tigera-ca-bundle.crt"}, + corev1.EnvVar{Name: "TLS_KEY_PATH", Value: "/tigera-fluentd-prometheus-tls/tls.key"}, + corev1.EnvVar{Name: "TLS_CRT_PATH", Value: "/tigera-fluentd-prometheus-tls/tls.crt"}, + corev1.EnvVar{Name: "FLUENT_UID", Value: "0"}, + corev1.EnvVar{Name: "FLOW_LOG_FILE", Value: "/var/log/calico/flowlogs/flows.log"}, + corev1.EnvVar{Name: "FLUENTD_ES_SECURE", Value: "true"}, + corev1.EnvVar{ + Name: "NODENAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "spec.nodeName"}, + }, + }, + corev1.EnvVar{Name: "LINSEED_TOKEN", Value: "/var/run/secrets/kubernetes.io/serviceaccount/token"}, + )) + + container := ds.Spec.Template.Spec.Containers[0] + + Expect(container.ReadinessProbe.Exec.Command).To(ConsistOf([]string{"sh", "-c", "/bin/readiness.sh"})) + Expect(container.ReadinessProbe.TimeoutSeconds).To(BeEquivalentTo(10)) + Expect(container.ReadinessProbe.PeriodSeconds).To(BeEquivalentTo(60)) + + Expect(container.LivenessProbe.Exec.Command).To(ConsistOf([]string{"sh", "-c", "/bin/liveness.sh"})) + Expect(container.LivenessProbe.TimeoutSeconds).To(BeEquivalentTo(10)) + Expect(container.LivenessProbe.PeriodSeconds).To(BeEquivalentTo(60)) + + Expect(container.StartupProbe.Exec.Command).To(ConsistOf([]string{"sh", "-c", "/bin/liveness.sh"})) + Expect(container.StartupProbe.TimeoutSeconds).To(BeEquivalentTo(10)) + Expect(container.StartupProbe.PeriodSeconds).To(BeEquivalentTo(60)) + Expect(container.StartupProbe.FailureThreshold).To(BeEquivalentTo(10)) + + Expect(*container.SecurityContext.AllowPrivilegeEscalation).To(BeFalse()) + Expect(*container.SecurityContext.Privileged).To(BeFalse()) + Expect(*container.SecurityContext.RunAsGroup).To(BeEquivalentTo(0)) + Expect(*container.SecurityContext.RunAsNonRoot).To(BeFalse()) + Expect(*container.SecurityContext.RunAsUser).To(BeEquivalentTo(0)) + Expect(container.SecurityContext.Capabilities).To(Equal( + &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + )) + Expect(container.SecurityContext.SeccompProfile).To(Equal( + &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + })) + + // The metrics service should have the correct configuration. + ms := rtest.GetResource(resources, render.FluentdMetricsService, render.LogCollectorNamespace, "", "v1", "Service").(*corev1.Service) + Expect(ms.Spec.ClusterIP).To(Equal("None"), "metrics service should be headless to prevent kube-proxy from rendering too many iptables rules") + }) + It("should render with a resource quota for provider GKE", func() { cfg.Installation.KubernetesProvider = operatorv1.ProviderGKE @@ -831,6 +924,7 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { cfg.Installation = &operatorv1.InstallationSpec{ KubernetesProvider: operatorv1.ProviderEKS, ControlPlaneTolerations: []corev1.Toleration{t}, + Variant: operatorv1.TigeraSecureEnterprise, } component := render.Fluentd(cfg) resources, _ := component.Objects() diff --git a/pkg/render/guardian.go b/pkg/render/guardian.go index 916fd6b9c7..97d86ee390 100644 --- a/pkg/render/guardian.go +++ b/pkg/render/guardian.go @@ -103,7 +103,11 @@ func (c *GuardianComponent) ResolveImages(is *operatorv1.ImageSet) error { path := c.cfg.Installation.ImagePath prefix := c.cfg.Installation.ImagePrefix var err error - c.image, err = components.GetReference(components.ComponentGuardian, reg, path, prefix, is) + if c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { + c.image, err = components.GetReference(components.ComponentGuardian, reg, path, prefix, is) + } else { + c.image, err = components.GetReference(components.ComponentCalicoGuardian, reg, path, prefix, is) + } return err } @@ -131,12 +135,17 @@ func (c *GuardianComponent) Objects() ([]client.Object, []client.Object) { managerServiceAccount(ManagerNamespace), managerClusterRole(false, true, c.cfg.UsePSP, c.cfg.Installation.KubernetesProvider), managerClusterRoleBinding([]string{ManagerNamespace}), - managerClusterWideSettingsGroup(), - managerUserSpecificSettingsGroup(), - managerClusterWideTigeraLayer(), - managerClusterWideDefaultView(), ) + if c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { + objs = append(objs, + managerClusterWideSettingsGroup(), + managerUserSpecificSettingsGroup(), + managerClusterWideTigeraLayer(), + managerClusterWideDefaultView(), + ) + } + if c.cfg.UsePSP { objs = append(objs, c.podSecurityPolicy()) } diff --git a/pkg/render/guardian_test.go b/pkg/render/guardian_test.go index 65a76ba62a..cf551debfa 100644 --- a/pkg/render/guardian_test.go +++ b/pkg/render/guardian_test.go @@ -42,7 +42,7 @@ import ( "k8s.io/apimachinery/pkg/types" ) -var _ = Describe("Rendering tests", func() { +var _ = Describe("Guardian Rendering tests", func() { var cfg *render.GuardianConfiguration var g render.Component var resources []client.Object @@ -84,6 +84,90 @@ var _ = Describe("Rendering tests", func() { } } + Context("Guardian component OSS", func() { + renderGuardian := func(i operatorv1.InstallationSpec) { + cfg = createGuardianConfig(i, "127.0.0.1:1234", false) + g = render.Guardian(cfg) + Expect(g.ResolveImages(nil)).To(BeNil()) + resources, _ = g.Objects() + } + + BeforeEach(func() { + renderGuardian(operatorv1.InstallationSpec{Registry: "my-reg/", Variant: operatorv1.Calico}) + }) + + It("should render all resources for a managed OSS cluster", func() { + expectedResources := []struct { + name string + ns string + group string + version string + kind string + }{ + {name: render.GuardianNamespace, ns: "", group: "", version: "v1", kind: "Namespace"}, + {name: "pull-secret", ns: render.GuardianNamespace, group: "", version: "v1", kind: "Secret"}, + {name: render.GuardianServiceAccountName, ns: render.GuardianNamespace, group: "", version: "v1", kind: "ServiceAccount"}, + {name: render.GuardianClusterRoleName, ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, + {name: render.GuardianClusterRoleBindingName, ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + {name: render.GuardianDeploymentName, ns: render.GuardianNamespace, group: "apps", version: "v1", kind: "Deployment"}, + {name: render.GuardianServiceName, ns: render.GuardianNamespace, group: "", version: "", kind: ""}, + {name: render.GuardianSecretName, ns: render.GuardianNamespace, group: "", version: "v1", kind: "Secret"}, + {name: "tigera-ca-bundle", ns: render.GuardianNamespace, group: "", version: "v1", kind: "ConfigMap"}, + {name: render.ManagerNamespace, ns: "", group: "", version: "v1", kind: "Namespace"}, + {name: render.ManagerServiceAccount, ns: render.ManagerNamespace, group: "", version: "v1", kind: "ServiceAccount"}, + {name: render.ManagerClusterRole, ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, + {name: render.ManagerClusterRoleBinding, ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, + } + Expect(len(resources)).To(Equal(len(expectedResources))) + for i, expectedRes := range expectedResources { + rtest.CompareResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) + } + + deployment := rtest.GetResource(resources, render.GuardianDeploymentName, render.GuardianNamespace, "apps", "v1", "Deployment").(*appsv1.Deployment) + Expect(deployment.Spec.Template.Spec.Containers).To(HaveLen(1)) + Expect(deployment.Spec.Template.Spec.Containers[0].Image).Should(Equal("my-reg/tigera/guardian:" + components.ComponentGuardian.Version)) + + Expect(*deployment.Spec.Template.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation).To(BeFalse()) + Expect(*deployment.Spec.Template.Spec.Containers[0].SecurityContext.Privileged).To(BeFalse()) + Expect(*deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsGroup).To(BeEquivalentTo(10001)) + Expect(*deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsNonRoot).To(BeTrue()) + Expect(*deployment.Spec.Template.Spec.Containers[0].SecurityContext.RunAsUser).To(BeEquivalentTo(10001)) + Expect(deployment.Spec.Template.Spec.Containers[0].SecurityContext.SeccompProfile).To(Equal( + &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + })) + Expect(deployment.Spec.Template.Spec.Containers[0].SecurityContext.Capabilities).To(Equal( + &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + )) + + // Check the namespace. + ns := rtest.GetResource(resources, "tigera-guardian", "", "", "v1", "Namespace").(*corev1.Namespace) + Expect(ns.Labels["pod-security.kubernetes.io/enforce"]).To(Equal("restricted")) + Expect(ns.Labels["pod-security.kubernetes.io/enforce-version"]).To(Equal("latest")) + + crb := rtest.GetResource(resources, render.ManagerClusterRoleBinding, "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding").(*rbacv1.ClusterRoleBinding) + Expect(crb.Subjects).To(Equal([]rbacv1.Subject{{ + Kind: "ServiceAccount", + Name: render.ManagerServiceAccount, + Namespace: render.ManagerNamespace, + }})) + }) + + It("should render controlPlaneTolerations", func() { + t := corev1.Toleration{ + Key: "foo", + Operator: corev1.TolerationOpEqual, + Value: "bar", + } + renderGuardian(operatorv1.InstallationSpec{ + ControlPlaneTolerations: []corev1.Toleration{t}, + }) + deployment := rtest.GetResource(resources, render.GuardianDeploymentName, render.GuardianNamespace, "apps", "v1", "Deployment").(*appsv1.Deployment) + Expect(deployment.Spec.Template.Spec.Tolerations).Should(ContainElements(append(rmeta.TolerateCriticalAddonsAndControlPlane, t))) + }) + }) Context("Guardian component", func() { renderGuardian := func(i operatorv1.InstallationSpec) { cfg = createGuardianConfig(i, "127.0.0.1:1234", false) @@ -93,7 +177,7 @@ var _ = Describe("Rendering tests", func() { } BeforeEach(func() { - renderGuardian(operatorv1.InstallationSpec{Registry: "my-reg/"}) + renderGuardian(operatorv1.InstallationSpec{Registry: "my-reg/", Variant: operatorv1.TigeraSecureEnterprise}) }) It("should render all resources for a managed cluster", func() { diff --git a/pkg/render/kubecontrollers/kube-controllers.go b/pkg/render/kubecontrollers/kube-controllers.go index 46b520ddc5..1a86262581 100644 --- a/pkg/render/kubecontrollers/kube-controllers.go +++ b/pkg/render/kubecontrollers/kube-controllers.go @@ -332,6 +332,17 @@ func kubeControllersRoleCommonRules(cfg *KubeControllersConfiguration, kubeContr Resources: []string{"kubecontrollersconfigurations"}, Verbs: []string{"get", "create", "update", "watch"}, }, + { + APIGroups: []string{""}, + Resources: []string{"configmaps", "secrets"}, + Verbs: []string{"watch", "list", "get", "update", "create", "delete"}, + }, + { + // calico-kube-controllers requires tiers create + APIGroups: []string{"crd.projectcalico.org"}, + Resources: []string{"tiers"}, + Verbs: []string{"create"}, + }, } if cfg.UsePSP { @@ -349,23 +360,12 @@ func kubeControllersRoleCommonRules(cfg *KubeControllersConfiguration, kubeContr func kubeControllersRoleEnterpriseCommonRules(cfg *KubeControllersConfiguration) []rbacv1.PolicyRule { rules := []rbacv1.PolicyRule{ - { - APIGroups: []string{""}, - Resources: []string{"configmaps", "secrets"}, - Verbs: []string{"watch", "list", "get", "update", "create", "delete"}, - }, { // Needed to validate the license APIGroups: []string{"projectcalico.org"}, Resources: []string{"licensekeys"}, Verbs: []string{"get", "watch", "list"}, }, - { - // calico-kube-controllers requires tiers create - APIGroups: []string{"crd.projectcalico.org"}, - Resources: []string{"tiers"}, - Verbs: []string{"create"}, - }, { // Needed to validate the license APIGroups: []string{"crd.projectcalico.org"}, diff --git a/pkg/render/logstorage.go b/pkg/render/logstorage.go index 2d3e5ae37d..5d6a61da0f 100644 --- a/pkg/render/logstorage.go +++ b/pkg/render/logstorage.go @@ -2062,10 +2062,13 @@ func (m *managedClusterLogStorage) Objects() (objsToCreate []client.Object, objs role, binding := m.linseedExternalRoleAndBinding() toCreate = append(toCreate, CreateNamespace(ElasticsearchNamespace, m.cfg.Installation.KubernetesProvider, PSSPrivileged), - m.elasticsearchExternalService(), m.linseedExternalService(), role, binding, ) + + if m.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { + toCreate = append(toCreate, m.elasticsearchExternalService()) + } return toCreate, nil } diff --git a/pkg/render/logstorage_test.go b/pkg/render/logstorage_test.go index 656e1d06bb..c8aaa8fddb 100644 --- a/pkg/render/logstorage_test.go +++ b/pkg/render/logstorage_test.go @@ -964,9 +964,8 @@ var _ = Describe("Elasticsearch rendering tests", func() { }) }) - Context("Managed cluster", func() { + Context("Managed cluster OSS", func() { var cfg *render.ManagedClusterLogStorageConfiguration - var managementClusterConnection *operatorv1.ManagementClusterConnection BeforeEach(func() { replicas := int32(1) @@ -974,8 +973,8 @@ var _ = Describe("Elasticsearch rendering tests", func() { ControlPlaneReplicas: &replicas, KubernetesProvider: operatorv1.ProviderNone, Registry: "testregistry.com/", + Variant: operatorv1.Calico, } - managementClusterConnection = &operatorv1.ManagementClusterConnection{} cfg = &render.ManagedClusterLogStorageConfiguration{ Installation: installation, ClusterDomain: "cluster.local", @@ -986,11 +985,45 @@ var _ = Describe("Elasticsearch rendering tests", func() { It("creates Managed cluster logstorage components", func() { expectedCreateResources := []resourceTestObj{ {render.ElasticsearchNamespace, "", &corev1.Namespace{}, nil}, - {render.ESGatewayServiceName, render.ElasticsearchNamespace, &corev1.Service{}, func(resource runtime.Object) { + {render.LinseedServiceName, render.ElasticsearchNamespace, &corev1.Service{}, func(resource runtime.Object) { svc := resource.(*corev1.Service) Expect(svc.Spec.Type).Should(Equal(corev1.ServiceTypeExternalName)) Expect(svc.Spec.ExternalName).Should(Equal(fmt.Sprintf("%s.%s.svc.%s", render.GuardianServiceName, render.GuardianNamespace, dns.DefaultClusterDomain))) }}, + {"tigera-linseed", "", &rbacv1.ClusterRole{}, nil}, + {"tigera-linseed", "tigera-fluentd", &rbacv1.RoleBinding{}, nil}, + } + component := render.NewManagedClusterLogStorage(cfg) + createResources, deleteResources := component.Objects() + compareResources(createResources, expectedCreateResources) + compareResources(deleteResources, []resourceTestObj{}) + }) + }) + }) + + Context("Managed cluster", func() { + var cfg *render.ManagedClusterLogStorageConfiguration + var managementClusterConnection *operatorv1.ManagementClusterConnection + + BeforeEach(func() { + replicas := int32(1) + installation := &operatorv1.InstallationSpec{ + ControlPlaneReplicas: &replicas, + KubernetesProvider: operatorv1.ProviderNone, + Registry: "testregistry.com/", + Variant: operatorv1.TigeraSecureEnterprise, + } + managementClusterConnection = &operatorv1.ManagementClusterConnection{} + cfg = &render.ManagedClusterLogStorageConfiguration{ + Installation: installation, + ClusterDomain: "cluster.local", + } + }) + + Context("Initial creation", func() { + It("creates Managed cluster logstorage components", func() { + expectedCreateResources := []resourceTestObj{ + {render.ElasticsearchNamespace, "", &corev1.Namespace{}, nil}, {render.LinseedServiceName, render.ElasticsearchNamespace, &corev1.Service{}, func(resource runtime.Object) { svc := resource.(*corev1.Service) Expect(svc.Spec.Type).Should(Equal(corev1.ServiceTypeExternalName)) @@ -998,6 +1031,11 @@ var _ = Describe("Elasticsearch rendering tests", func() { }}, {"tigera-linseed", "", &rbacv1.ClusterRole{}, nil}, {"tigera-linseed", "tigera-fluentd", &rbacv1.RoleBinding{}, nil}, + {render.ESGatewayServiceName, render.ElasticsearchNamespace, &corev1.Service{}, func(resource runtime.Object) { + svc := resource.(*corev1.Service) + Expect(svc.Spec.Type).Should(Equal(corev1.ServiceTypeExternalName)) + Expect(svc.Spec.ExternalName).Should(Equal(fmt.Sprintf("%s.%s.svc.%s", render.GuardianServiceName, render.GuardianNamespace, dns.DefaultClusterDomain))) + }}, } component := render.NewManagedClusterLogStorage(cfg) createResources, deleteResources := component.Objects() diff --git a/pkg/render/node.go b/pkg/render/node.go index f692938115..ab8d3d1736 100644 --- a/pkg/render/node.go +++ b/pkg/render/node.go @@ -436,6 +436,7 @@ func (c *nodeComponent) nodeRole() *rbacv1.ClusterRole { "stagedkubernetesnetworkpolicies", "stagednetworkpolicies", "networksets", + "tiers", }, Verbs: []string{"get", "list", "watch"}, }, @@ -464,6 +465,7 @@ func (c *nodeComponent) nodeRole() *rbacv1.ClusterRole { "clusterinformations", "felixconfigurations", "ippools", + "tiers", }, Verbs: []string{"create", "update"}, }, @@ -1055,15 +1057,12 @@ func (c *nodeComponent) nodeVolumes() []corev1.Volume { volumes = append(volumes, corev1.Volume{Name: "cni-log-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: cniLogDir}}}) } - // Override with Tigera-specific config. - if c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { - // Add volume for calico logs. - calicoLogVol := corev1.Volume{ - Name: "var-log-calico", - VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico", Type: &dirOrCreate}}, - } - volumes = append(volumes, calicoLogVol) + // Add volume for calico logs. + calicoLogVol := corev1.Volume{ + Name: "var-log-calico", + VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico", Type: &dirOrCreate}}, } + volumes = append(volumes, calicoLogVol) // Create and append flexvolume if c.cfg.Installation.FlexVolumePath != "None" { @@ -1302,12 +1301,10 @@ func (c *nodeComponent) nodeVolumeMounts() []corev1.VolumeMount { if c.vppDataplaneEnabled() { nodeVolumeMounts = append(nodeVolumeMounts, corev1.VolumeMount{MountPath: "/usr/local/bin/felix-plugins", Name: "felix-plugins", ReadOnly: true}) } - if c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { - extraNodeMounts := []corev1.VolumeMount{ - {MountPath: "/var/log/calico", Name: "var-log-calico"}, - } - nodeVolumeMounts = append(nodeVolumeMounts, extraNodeMounts...) - } else if c.cfg.Installation.CNI.Type == operatorv1.PluginCalico { + + nodeVolumeMounts = append(nodeVolumeMounts, corev1.VolumeMount{MountPath: "/var/log/calico", Name: "var-log-calico"}) + + if c.cfg.Installation.CNI.Type == operatorv1.PluginCalico { cniLogMount := corev1.VolumeMount{MountPath: "/var/log/calico/cni", Name: "cni-log-dir", ReadOnly: false} nodeVolumeMounts = append(nodeVolumeMounts, cniLogMount) } diff --git a/pkg/render/node_test.go b/pkg/render/node_test.go index bcd9a7fc78..7d6a76ada4 100644 --- a/pkg/render/node_test.go +++ b/pkg/render/node_test.go @@ -368,6 +368,7 @@ var _ = Describe("Node rendering tests", func() { {Name: "cni-bin-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/opt/cni/bin"}}}, {Name: "cni-net-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/etc/cni/net.d"}}}, {Name: "cni-log-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico/cni"}}}, + {Name: "var-log-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico", Type: &dirOrCreate}}}, {Name: "policysync", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/run/nodeagent", Type: &dirOrCreate}}}, { Name: "tigera-ca-bundle", @@ -403,6 +404,7 @@ var _ = Describe("Node rendering tests", func() { {MountPath: "/etc/pki/tls/certs", Name: "tigera-ca-bundle", ReadOnly: true}, {MountPath: "/node-certs", Name: render.NodeTLSSecretName, ReadOnly: true}, {MountPath: "/var/log/calico/cni", Name: "cni-log-dir", ReadOnly: false}, + {MountPath: "/var/log/calico", Name: "var-log-calico"}, } Expect(ds.Spec.Template.Spec.Containers[0].VolumeMounts).To(ConsistOf(expectedNodeVolumeMounts)) @@ -617,6 +619,7 @@ var _ = Describe("Node rendering tests", func() { {Name: "cni-log-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico/cni"}}}, {Name: "sys-fs", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/sys/fs", Type: &dirOrCreate}}}, {Name: "bpffs", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/sys/fs/bpf", Type: &dirMustExist}}}, + {Name: "var-log-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico", Type: &dirOrCreate}}}, {Name: "nodeproc", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/proc"}}}, {Name: "policysync", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/run/nodeagent", Type: &dirOrCreate}}}, { @@ -654,6 +657,7 @@ var _ = Describe("Node rendering tests", func() { {MountPath: "/node-certs", Name: render.NodeTLSSecretName, ReadOnly: true}, {MountPath: "/var/log/calico/cni", Name: "cni-log-dir", ReadOnly: false}, {MountPath: "/sys/fs/bpf", Name: "bpffs"}, + {MountPath: "/var/log/calico", Name: "var-log-calico"}, } Expect(ds.Spec.Template.Spec.Containers[0].VolumeMounts).To(ConsistOf(expectedNodeVolumeMounts)) @@ -939,6 +943,7 @@ var _ = Describe("Node rendering tests", func() { {Name: "cni-net-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/etc/cni/net.d"}}}, {Name: "cni-log-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico/cni"}}}, {Name: "policysync", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/run/nodeagent", Type: &dirOrCreate}}}, + {Name: "var-log-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico", Type: &dirOrCreate}}}, { Name: "tigera-ca-bundle", VolumeSource: corev1.VolumeSource{ @@ -974,6 +979,7 @@ var _ = Describe("Node rendering tests", func() { {MountPath: "/etc/pki/tls/certs", Name: "tigera-ca-bundle", ReadOnly: true}, {MountPath: "/node-certs", Name: render.NodeTLSSecretName, ReadOnly: true}, {MountPath: "/var/log/calico/cni", Name: "cni-log-dir", ReadOnly: false}, + {MountPath: "/var/log/calico", Name: "var-log-calico"}, } Expect(ds.Spec.Template.Spec.Containers[0].VolumeMounts).To(ConsistOf(expectedNodeVolumeMounts)) @@ -1152,6 +1158,7 @@ var _ = Describe("Node rendering tests", func() { {Name: "cni-net-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/etc/cni/net.d"}}}, {Name: "cni-log-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico/cni"}}}, {Name: "policysync", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/run/nodeagent", Type: &dirOrCreate}}}, + {Name: "var-log-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico", Type: &dirOrCreate}}}, { Name: "tigera-ca-bundle", VolumeSource: corev1.VolumeSource{ @@ -1182,6 +1189,7 @@ var _ = Describe("Node rendering tests", func() { {MountPath: "/run/xtables.lock", Name: "xtables-lock"}, {MountPath: "/var/run/calico", Name: "var-run-calico"}, {MountPath: "/var/lib/calico", Name: "var-lib-calico"}, + {MountPath: "/var/log/calico", Name: "var-log-calico"}, {MountPath: "/var/run/nodeagent", Name: "policysync"}, {MountPath: "/etc/pki/tls/certs", Name: "tigera-ca-bundle", ReadOnly: true}, {MountPath: "/node-certs", Name: render.NodeTLSSecretName, ReadOnly: true}, @@ -1296,6 +1304,7 @@ var _ = Describe("Node rendering tests", func() { {Name: "var-lib-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/lib/calico"}}}, {Name: "xtables-lock", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/run/xtables.lock", Type: &fileOrCreate}}}, {Name: "policysync", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/run/nodeagent", Type: &dirOrCreate}}}, + {Name: "var-log-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico", Type: &dirOrCreate}}}, { Name: "tigera-ca-bundle", VolumeSource: corev1.VolumeSource{ @@ -1326,6 +1335,7 @@ var _ = Describe("Node rendering tests", func() { {MountPath: "/var/run/calico", Name: "var-run-calico"}, {MountPath: "/var/lib/calico", Name: "var-lib-calico"}, {MountPath: "/var/run/nodeagent", Name: "policysync"}, + {MountPath: "/var/log/calico", Name: "var-log-calico"}, {MountPath: "/etc/pki/tls/certs", Name: "tigera-ca-bundle", ReadOnly: true}, {MountPath: "/node-certs", Name: render.NodeTLSSecretName, ReadOnly: true}, } @@ -1561,6 +1571,7 @@ var _ = Describe("Node rendering tests", func() { {Name: "lib-modules", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/lib/modules"}}}, {Name: "var-run-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/run/calico"}}}, {Name: "var-lib-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/lib/calico"}}}, + {Name: "var-log-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico", Type: &dirOrCreate}}}, {Name: "xtables-lock", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/run/xtables.lock", Type: &fileOrCreate}}}, {Name: "cni-bin-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/opt/cni/bin"}}}, {Name: "cni-net-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/etc/cni/net.d"}}}, @@ -1600,6 +1611,7 @@ var _ = Describe("Node rendering tests", func() { {MountPath: "/etc/pki/tls/certs", Name: "tigera-ca-bundle", ReadOnly: true}, {MountPath: "/node-certs", Name: render.NodeTLSSecretName, ReadOnly: true}, {MountPath: "/var/log/calico/cni", Name: "cni-log-dir", ReadOnly: false}, + {MountPath: "/var/log/calico", Name: "var-log-calico"}, } Expect(ds.Spec.Template.Spec.Containers[0].VolumeMounts).To(ConsistOf(expectedNodeVolumeMounts)) @@ -1705,6 +1717,7 @@ var _ = Describe("Node rendering tests", func() { {Name: "lib-modules", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/lib/modules"}}}, {Name: "var-run-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/run/calico"}}}, {Name: "var-lib-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/lib/calico"}}}, + {Name: "var-log-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico", Type: &dirOrCreate}}}, {Name: "xtables-lock", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/run/xtables.lock", Type: &fileOrCreate}}}, {Name: "policysync", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/run/nodeagent", Type: &dirOrCreate}}}, { @@ -1736,6 +1749,7 @@ var _ = Describe("Node rendering tests", func() { {MountPath: "/run/xtables.lock", Name: "xtables-lock"}, {MountPath: "/var/run/calico", Name: "var-run-calico"}, {MountPath: "/var/lib/calico", Name: "var-lib-calico"}, + {MountPath: "/var/log/calico", Name: "var-log-calico"}, {MountPath: "/var/run/nodeagent", Name: "policysync"}, {MountPath: "/etc/pki/tls/certs", Name: "tigera-ca-bundle", ReadOnly: true}, {MountPath: "/node-certs", Name: render.NodeTLSSecretName, ReadOnly: true}, @@ -1806,6 +1820,7 @@ var _ = Describe("Node rendering tests", func() { {Name: "lib-modules", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/lib/modules"}}}, {Name: "var-run-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/run/calico"}}}, {Name: "var-lib-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/lib/calico"}}}, + {Name: "var-log-calico", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/log/calico", Type: &dirOrCreate}}}, {Name: "xtables-lock", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/run/xtables.lock", Type: &fileOrCreate}}}, {Name: "cni-bin-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/lib/cni/bin"}}}, {Name: "cni-net-dir", VolumeSource: corev1.VolumeSource{HostPath: &corev1.HostPathVolumeSource{Path: "/var/run/multus/cni/net.d"}}}, diff --git a/pkg/render/typha.go b/pkg/render/typha.go index c68de2e732..fcb97ea635 100644 --- a/pkg/render/typha.go +++ b/pkg/render/typha.go @@ -259,6 +259,10 @@ func (c *typhaComponent) typhaRole() *rbacv1.ClusterRole { "ipreservations", "networkpolicies", "networksets", + "stagedglobalnetworkpolicies", + "stagedkubernetesnetworkpolicies", + "stagednetworkpolicies", + "tiers", }, Verbs: []string{"get", "list", "watch"}, }, @@ -279,6 +283,7 @@ func (c *typhaComponent) typhaRole() *rbacv1.ClusterRole { "clusterinformations", "felixconfigurations", "ippools", + "tiers", }, Verbs: []string{"create", "update"}, }, From 012808168d76e0d5f605c005d1e1953cd7dfa923 Mon Sep 17 00:00:00 2001 From: sridhar Date: Tue, 12 Sep 2023 10:02:06 -0700 Subject: [PATCH 2/3] Address review comments --- .../crd.projectcalico.org/v1/felixconfig.go | 3 + .../v1/zz_generated.deepcopy.go | 5 + .../clusterconnection_controller.go | 9 +- .../clusterconnection_controller_test.go | 4 +- .../logcollector/logcollector_controller.go | 290 ++++++++------- .../logcollector_controller_test.go | 18 +- .../managed_cluster_controller_test.go | 9 +- pkg/render/fluentd.go | 15 +- pkg/render/fluentd_test.go | 331 +++++++----------- pkg/render/guardian_test.go | 7 +- pkg/render/logstorage.go | 17 - pkg/render/logstorage_test.go | 9 +- pkg/render/node.go | 14 +- pkg/render/typha.go | 12 - 14 files changed, 310 insertions(+), 433 deletions(-) diff --git a/pkg/apis/crd.projectcalico.org/v1/felixconfig.go b/pkg/apis/crd.projectcalico.org/v1/felixconfig.go index 28a8abd5d3..e244501fe3 100644 --- a/pkg/apis/crd.projectcalico.org/v1/felixconfig.go +++ b/pkg/apis/crd.projectcalico.org/v1/felixconfig.go @@ -402,6 +402,9 @@ type FelixConfigurationSpec struct { // FlowLogsEnableNetworkSets enables flow logs reporting for global network sets. [Default: false] FlowLogsEnableNetworkSets *bool `json:"flowLogsEnableNetworkSets,omitempty"` + + // FlowLogsFileIncludeLabels controls whether labels are reported in the flow logs. [Default: false] + FlowLogsFileIncludeLabels *bool `json:"flowLogsFileIncludeLabels,omitempty"` } type RouteTableRange struct { diff --git a/pkg/apis/crd.projectcalico.org/v1/zz_generated.deepcopy.go b/pkg/apis/crd.projectcalico.org/v1/zz_generated.deepcopy.go index a7f8193165..07b5fc344e 100644 --- a/pkg/apis/crd.projectcalico.org/v1/zz_generated.deepcopy.go +++ b/pkg/apis/crd.projectcalico.org/v1/zz_generated.deepcopy.go @@ -723,6 +723,11 @@ func (in *FelixConfigurationSpec) DeepCopyInto(out *FelixConfigurationSpec) { *out = new(bool) **out = **in } + if in.FlowLogsFileIncludeLabels != nil { + in, out := &in.FlowLogsFileIncludeLabels, &out.FlowLogsFileIncludeLabels + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FelixConfigurationSpec. diff --git a/pkg/controller/clusterconnection/clusterconnection_controller.go b/pkg/controller/clusterconnection/clusterconnection_controller.go index 5dbb308c18..efaebdf0eb 100644 --- a/pkg/controller/clusterconnection/clusterconnection_controller.go +++ b/pkg/controller/clusterconnection/clusterconnection_controller.go @@ -128,6 +128,11 @@ func add(mgr manager.Manager, c controller.Controller, enterpriseCRDExists bool) return fmt.Errorf("%s failed to watch Secret resource %s: %w", controllerName, render.GuardianSecretName, err) } + // Watch for changes to the secrets associated with Prometheus. + if err = utils.AddSecretsWatch(c, monitor.PrometheusTLSSecretName, common.OperatorNamespace()); err != nil { + return fmt.Errorf("%s failed to watch Secret resource %s: %w", controllerName, monitor.PrometheusTLSSecretName, err) + } + if enterpriseCRDExists { // Watch for changes to primary resource ManagementCluster err := c.Watch(&source.Kind{Type: &operatorv1.ManagementCluster{}}, &handler.EnqueueRequestForObject{}) @@ -138,10 +143,6 @@ func add(mgr manager.Manager, c controller.Controller, enterpriseCRDExists bool) if err = utils.AddSecretsWatch(c, render.PacketCaptureServerCert, common.OperatorNamespace()); err != nil { return fmt.Errorf("%s failed to watch Secret resource %s: %w", controllerName, render.PacketCaptureServerCert, err) } - // Watch for changes to the secrets associated with Prometheus. - if err = utils.AddSecretsWatch(c, monitor.PrometheusTLSSecretName, common.OperatorNamespace()); err != nil { - return fmt.Errorf("%s failed to watch Secret resource %s: %w", controllerName, monitor.PrometheusTLSSecretName, err) - } } if err = utils.AddSecretsWatch(c, certificatemanagement.CASecretName, common.OperatorNamespace()); err != nil { diff --git a/pkg/controller/clusterconnection/clusterconnection_controller_test.go b/pkg/controller/clusterconnection/clusterconnection_controller_test.go index 9fc4e92c8f..5376c5ec58 100644 --- a/pkg/controller/clusterconnection/clusterconnection_controller_test.go +++ b/pkg/controller/clusterconnection/clusterconnection_controller_test.go @@ -48,7 +48,7 @@ import ( "github.com/tigera/operator/test" ) -var _ = Describe("ManagementClusterConnection controller tests", func() { +var _ = Describe("ManagementClusterConnection controller tests(Calico Enterprise)", func() { var c client.Client var ctx context.Context var cfg *operatorv1.ManagementClusterConnection @@ -513,7 +513,7 @@ var _ = Describe("ManagementClusterConnection controller tests", func() { }) }) -var _ = Describe("ManagementClusterConnection controller tests(OSS)", func() { +var _ = Describe("ManagementClusterConnection controller tests(Calico)", func() { var c client.Client var ctx context.Context var cfg *operatorv1.ManagementClusterConnection diff --git a/pkg/controller/logcollector/logcollector_controller.go b/pkg/controller/logcollector/logcollector_controller.go index 414054b8c3..a3966827f8 100644 --- a/pkg/controller/logcollector/logcollector_controller.go +++ b/pkg/controller/logcollector/logcollector_controller.go @@ -80,66 +80,23 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { return fmt.Errorf("Failed to create logcollector-controller: %v", err) } - if opts.EnterpriseCRDExists { - k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig()) - if err != nil { - log.Error(err, "Failed to establish a connection to k8s") - return err - } - go utils.WaitToAddTierWatch(networkpolicy.TigeraComponentTierName, controller, k8sClient, log, tierWatchReady) - go utils.WaitToAddLicenseKeyWatch(controller, k8sClient, log, licenseAPIReady) - go utils.WaitToAddNetworkPolicyWatches(controller, k8sClient, log, []types.NamespacedName{ - {Name: render.FluentdPolicyName, Namespace: render.LogCollectorNamespace}, - }) - } - - if opts.MultiTenant { - if err = controller.Watch(&source.Kind{Type: &operatorv1.Tenant{}}, &handler.EnqueueRequestForObject{}); err != nil { - return fmt.Errorf("logcollector-controller failed to watch Tenant resource: %w", err) - } - } - - return add(mgr, controller, opts.EnterpriseCRDExists) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager, opts options.AddOptions, licenseAPIReady *utils.ReadyFlag, tierWatchReady *utils.ReadyFlag) reconcile.Reconciler { - c := &ReconcileLogCollector{ - client: mgr.GetClient(), - scheme: mgr.GetScheme(), - provider: opts.DetectedProvider, - status: status.New(mgr.GetClient(), "log-collector", opts.KubernetesVersion), - clusterDomain: opts.ClusterDomain, - licenseAPIReady: licenseAPIReady, - tierWatchReady: tierWatchReady, - usePSP: opts.UsePSP, - multiTenant: opts.MultiTenant, - } - c.status.Run(opts.ShutdownContext) - return c -} - -// add adds watches for resources that are available at startup -func add(mgr manager.Manager, c controller.Controller, enterpriseCRDExists bool) error { - var err error - // Watch for changes to primary resource LogCollector - err = c.Watch(&source.Kind{Type: &operatorv1.LogCollector{}}, &handler.EnqueueRequestForObject{}) + err = controller.Watch(&source.Kind{Type: &operatorv1.LogCollector{}}, &handler.EnqueueRequestForObject{}) if err != nil { return fmt.Errorf("logcollector-controller failed to watch primary resource: %v", err) } - err = utils.AddAPIServerWatch(c) + err = utils.AddAPIServerWatch(controller) if err != nil { return fmt.Errorf("logcollector-controller failed to watch APIServer resource: %v", err) } - if err = utils.AddNetworkWatch(c); err != nil { + if err = utils.AddNetworkWatch(controller); err != nil { log.V(5).Info("Failed to create network watch", "err", err) return fmt.Errorf("logcollector-controller failed to watch Tigera network resource: %v", err) } - if err = imageset.AddImageSetWatch(c); err != nil { + if err = imageset.AddImageSetWatch(controller); err != nil { return fmt.Errorf("logcollector-controller failed to watch ImageSet: %w", err) } @@ -147,42 +104,74 @@ func add(mgr manager.Manager, c controller.Controller, enterpriseCRDExists bool) relasticsearch.PublicCertSecret, monitor.PrometheusTLSSecretName, render.FluentdPrometheusTLSSecretName, render.TigeraLinseedSecret, render.VoltronLinseedPublicCert, } { - if err = utils.AddSecretsWatch(c, secretName, common.OperatorNamespace()); err != nil { + if err = utils.AddSecretsWatch(controller, secretName, common.OperatorNamespace()); err != nil { return fmt.Errorf("log-collector-controller failed to watch the Secret resource(%s): %v", secretName, err) } } - - if enterpriseCRDExists { + if opts.EnterpriseCRDExists { + k8sClient, err := kubernetes.NewForConfig(mgr.GetConfig()) + if err != nil { + log.Error(err, "Failed to establish a connection to k8s") + return err + } + go utils.WaitToAddTierWatch(networkpolicy.TigeraComponentTierName, controller, k8sClient, log, tierWatchReady) + go utils.WaitToAddLicenseKeyWatch(controller, k8sClient, log, licenseAPIReady) + go utils.WaitToAddNetworkPolicyWatches(controller, k8sClient, log, []types.NamespacedName{ + {Name: render.FluentdPolicyName, Namespace: render.LogCollectorNamespace}, + }) for _, secretName := range []string{ render.ElasticsearchEksLogForwarderUserSecret, render.S3FluentdSecretName, render.EksLogForwarderSecret, render.SplunkFluentdTokenSecretName, render.SplunkFluentdCertificateSecretName, } { - if err = utils.AddSecretsWatch(c, secretName, common.OperatorNamespace()); err != nil { + if err = utils.AddSecretsWatch(controller, secretName, common.OperatorNamespace()); err != nil { return fmt.Errorf("log-collector-controller failed to watch the Secret resource(%s): %v", secretName, err) } } } + if opts.MultiTenant { + if err = controller.Watch(&source.Kind{Type: &operatorv1.Tenant{}}, &handler.EnqueueRequestForObject{}); err != nil { + return fmt.Errorf("logcollector-controller failed to watch Tenant resource: %w", err) + } + } + for _, configMapName := range []string{render.FluentdFilterConfigMapName, relasticsearch.ClusterConfigConfigMapName} { - if err = utils.AddConfigMapWatch(c, configMapName, common.OperatorNamespace(), &handler.EnqueueRequestForObject{}); err != nil { + if err = utils.AddConfigMapWatch(controller, configMapName, common.OperatorNamespace(), &handler.EnqueueRequestForObject{}); err != nil { return fmt.Errorf("logcollector-controller failed to watch ConfigMap %s: %v", configMapName, err) } } - err = c.Watch(&source.Kind{Type: &corev1.Node{}}, &handler.EnqueueRequestForObject{}) + err = controller.Watch(&source.Kind{Type: &corev1.Node{}}, &handler.EnqueueRequestForObject{}) if err != nil { return fmt.Errorf("logcollector-controller failed to watch the node resource: %w", err) } // Watch for changes to TigeraStatus. - if err = utils.AddTigeraStatusWatch(c, ResourceName); err != nil { + if err = utils.AddTigeraStatusWatch(controller, ResourceName); err != nil { return fmt.Errorf("logcollector-controller failed to watch log-collector Tigerastatus: %w", err) } return nil } +// newReconciler returns a new reconcile.Reconciler +func newReconciler(mgr manager.Manager, opts options.AddOptions, licenseAPIReady *utils.ReadyFlag, tierWatchReady *utils.ReadyFlag) reconcile.Reconciler { + c := &ReconcileLogCollector{ + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + provider: opts.DetectedProvider, + status: status.New(mgr.GetClient(), "log-collector", opts.KubernetesVersion), + clusterDomain: opts.ClusterDomain, + licenseAPIReady: licenseAPIReady, + tierWatchReady: tierWatchReady, + usePSP: opts.UsePSP, + multiTenant: opts.MultiTenant, + } + c.status.Run(opts.ShutdownContext) + return c +} + // blank assignment to verify that ReconcileLogCollector implements reconcile.Reconciler var _ reconcile.Reconciler = &ReconcileLogCollector{} @@ -224,11 +213,15 @@ func GetLogCollector(ctx context.Context, cli client.Client) (*operatorv1.LogCol // fillDefaults sets the default value of CollectProcessPath, syslog LogTypes, if not set. // This function returns the fields which were set to a default value in the logcollector instance. -func fillDefaults(instance *operatorv1.LogCollector) []string { +func fillDefaults(instance *operatorv1.LogCollector, variant operatorv1.ProductVariant) []string { // Keep track of whether we changed the LogCollector instance during reconcile, so that we know to save it. // Keep track of which fields were modified (helpful for error messages) modifiedFields := []string{} + if variant == operatorv1.Calico { + return modifiedFields + } + if instance.Spec.CollectProcessPath == nil { collectProcessPath := v1.CollectProcessPathEnable instance.Spec.CollectProcessPath = &collectProcessPath @@ -269,8 +262,6 @@ func fillDefaults(instance *operatorv1.LogCollector) []string { func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) reqLogger.Info("Reconciling LogCollector") - var license v3.LicenseKey - var err error // Fetch the LogCollector instance instance, err := GetLogCollector(ctx, r.client) if err != nil { @@ -327,6 +318,33 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile return reconcile.Result{}, nil } + preDefaultPatchFrom := client.MergeFrom(instance.DeepCopy()) + modifiedFields := fillDefaults(instance, variant) + if len(modifiedFields) > 0 { + if err = r.client.Patch(ctx, instance, preDefaultPatchFrom); err != nil { + r.status.SetDegraded(operatorv1.ResourcePatchError, fmt.Sprintf("Failed to set defaults for LogCollector fields: [%s]", + strings.Join(modifiedFields, ", "), + ), err, reqLogger) + return reconcile.Result{}, err + } + } + + pullSecrets, err := utils.GetNetworkingPullSecrets(installation, r.client) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error retrieving pull secrets", err, reqLogger) + return reconcile.Result{}, err + } + + certificateManager, err := certificatemanager.Create(r.client, installation, r.clusterDomain, common.OperatorNamespace()) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceCreateError, "Unable to create the Tigera CA", err, reqLogger) + return reconcile.Result{}, err + } + + var license v3.LicenseKey + var esClusterConfig *relasticsearch.ClusterConfig + var managementCluster *operatorv1.ManagementCluster + var prometheusCertificate certificatemanagement.CertificateInterface if variant == operatorv1.TigeraSecureEnterprise { // Validate that the tier watch is ready before querying the tier to ensure we utilize the cache. if !r.tierWatchReady.IsReady() { @@ -347,17 +365,6 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } } - preDefaultPatchFrom := client.MergeFrom(instance.DeepCopy()) - modifiedFields := fillDefaults(instance) - if len(modifiedFields) > 0 { - if err = r.client.Patch(ctx, instance, preDefaultPatchFrom); err != nil { - r.status.SetDegraded(operatorv1.ResourcePatchError, fmt.Sprintf("Failed to set defaults for LogCollector fields: [%s]", - strings.Join(modifiedFields, ", "), - ), err, reqLogger) - return reconcile.Result{}, err - } - } - if !r.licenseAPIReady.IsReady() { r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for LicenseKeyAPI to be ready", nil, reqLogger) return reconcile.Result{RequeueAfter: 10 * time.Second}, nil @@ -372,15 +379,6 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying license", err, reqLogger) return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } - } - - pullSecrets, err := utils.GetNetworkingPullSecrets(installation, r.client) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error retrieving pull secrets", err, reqLogger) - return reconcile.Result{}, err - } - var esClusterConfig *relasticsearch.ClusterConfig - if variant == operatorv1.TigeraSecureEnterprise { esClusterConfig, err = utils.GetElasticsearchClusterConfig(ctx, r.client) if err != nil { if errors.IsNotFound(err) { @@ -390,6 +388,20 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get the elasticsearch cluster configuration", err, reqLogger) return reconcile.Result{}, err } + managementCluster, err = utils.GetManagementCluster(ctx, r.client) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementCluster", err, reqLogger) + return reconcile.Result{}, err + } + + prometheusCertificate, err = certificateManager.GetCertificate(r.client, monitor.PrometheusClientTLSSecretName, common.OperatorNamespace()) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get certificate", err, reqLogger) + return reconcile.Result{}, err + } else if prometheusCertificate == nil { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Prometheus secrets are not available yet, waiting until they become available", nil, reqLogger) + return reconcile.Result{RequeueAfter: 5 * time.Second}, nil + } } // Try to grab the ManagementClusterConnection CR because we need it for network policy rendering, @@ -405,21 +417,6 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } managedCluster := managementClusterConnection != nil - var managementCluster *operatorv1.ManagementCluster - if variant == operatorv1.TigeraSecureEnterprise { - managementCluster, err = utils.GetManagementCluster(ctx, r.client) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error reading ManagementCluster", err, reqLogger) - return reconcile.Result{}, err - } - } - - certificateManager, err := certificatemanager.Create(r.client, installation, r.clusterDomain, common.OperatorNamespace()) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceCreateError, "Unable to create the Tigera CA", err, reqLogger) - return reconcile.Result{}, err - } - // fluentdKeyPair is the key pair fluentd presents to identify itself fluentdKeyPair, err := certificateManager.GetOrCreateKeyPair(r.client, render.FluentdPrometheusTLSSecretName, common.OperatorNamespace(), []string{render.FluentdPrometheusTLSSecretName}) if err != nil { @@ -427,18 +424,6 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile return reconcile.Result{}, err } - var prometheusCertificate certificatemanagement.CertificateInterface - if variant == operatorv1.TigeraSecureEnterprise { - prometheusCertificate, err = certificateManager.GetCertificate(r.client, monitor.PrometheusClientTLSSecretName, common.OperatorNamespace()) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "Failed to get certificate", err, reqLogger) - return reconcile.Result{}, err - } else if prometheusCertificate == nil { - r.status.SetDegraded(operatorv1.ResourceNotReady, "Prometheus secrets are not available yet, waiting until they become available", nil, reqLogger) - return reconcile.Result{RequeueAfter: 5 * time.Second}, nil - } - } - // Determine whether or not this is a multi-tenant management cluster. multiTenantManagement := r.multiTenant && managementCluster != nil if instance.Spec.MultiTenantManagementClusterNamespace != "" && !multiTenantManagement { @@ -484,7 +469,7 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } trustedBundleCerts := []certificatemanagement.CertificateInterface{linseedCertificate} - if variant == operatorv1.TigeraSecureEnterprise { + if prometheusCertificate != nil { trustedBundleCerts = append(trustedBundleCerts, prometheusCertificate) } // Fluentd needs to mount system certificates in the case where Splunk, Syslog or AWS are used. @@ -496,9 +481,17 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile certificateManager.AddToStatusManager(r.status, render.LogCollectorNamespace) + filters, err := getFluentdFilters(r.client) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error retrieving Fluentd filters", err, reqLogger) + return reconcile.Result{}, err + } + var s3Credential *render.S3Credential var splunkCredential *render.SplunkCredential var useSyslogCertificate bool + var eksConfig *render.EksCloudwatchLogConfig + if variant == operatorv1.TigeraSecureEnterprise { exportLogs := utils.IsFeatureActive(license, common.ExportLogsFeature) if !exportLogs && instance.Spec.AdditionalStores != nil { @@ -569,16 +562,6 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } } } - } - - filters, err := getFluentdFilters(r.client) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error retrieving Fluentd filters", err, reqLogger) - return reconcile.Result{}, err - } - - var eksConfig *render.EksCloudwatchLogConfig - if variant == operatorv1.TigeraSecureEnterprise { if installation.KubernetesProvider == operatorv1.ProviderEKS { log.Info("Managed kubernetes EKS found, getting necessary credentials and config") if instance.Spec.AdditionalSources != nil { @@ -595,41 +578,13 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } } } - } else { - // patch and get the felix configuration - _, err = utils.PatchFelixConfiguration(ctx, r.client, func(fc *crdv1.FelixConfiguration) bool { - patchRequired := false - if fc.Spec.FlowLogsFileEnabled == nil || !(*fc.Spec.FlowLogsFileEnabled) { - fc.Spec.FlowLogsFileEnabled = ptr.BoolToPtr(true) - patchRequired = true - } - - if fc.Spec.FlowLogsFileIncludeService == nil || !(*fc.Spec.FlowLogsFileIncludeService) { - fc.Spec.FlowLogsFileIncludeService = ptr.BoolToPtr(true) - patchRequired = true - } - - if fc.Spec.FlowLogsFileIncludePolicies == nil || !(*fc.Spec.FlowLogsFileIncludePolicies) { - fc.Spec.FlowLogsFileIncludePolicies = ptr.BoolToPtr(true) - patchRequired = true - } - - if fc.Spec.FlowLogsEnableHostEndpoint == nil || !(*fc.Spec.FlowLogsEnableHostEndpoint) { - fc.Spec.FlowLogsEnableHostEndpoint = ptr.BoolToPtr(true) - patchRequired = true - } + } - if fc.Spec.FlowLogsEnableNetworkSets == nil || !(*fc.Spec.FlowLogsEnableNetworkSets) { - fc.Spec.FlowLogsEnableNetworkSets = ptr.BoolToPtr(true) - patchRequired = true - } - return patchRequired // proceed with this patch - }) - if err != nil { - reqLogger.Error(err, "Error patching felix configuration") - r.status.SetDegraded(operatorv1.ResourcePatchError, "Error patching felix configuration", err, reqLogger) - return reconcile.Result{}, err - } + err = r.patchFelixConfiguration(ctx) + if err != nil { + reqLogger.Error(err, "Error patching felix configuration") + r.status.SetDegraded(operatorv1.ResourcePatchError, "Error patching felix configuration", err, reqLogger) + return reconcile.Result{}, err } // Create a component handler to manage the rendered component. @@ -738,6 +693,43 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile return reconcile.Result{}, nil } +func (r *ReconcileLogCollector) patchFelixConfiguration(ctx context.Context) error { + _, err := utils.PatchFelixConfiguration(ctx, r.client, func(fc *crdv1.FelixConfiguration) bool { + patchRequired := false + if fc.Spec.FlowLogsFileEnabled == nil || !(*fc.Spec.FlowLogsFileEnabled) { + fc.Spec.FlowLogsFileEnabled = ptr.BoolToPtr(true) + patchRequired = true + } + + if fc.Spec.FlowLogsFileIncludeService == nil || !(*fc.Spec.FlowLogsFileIncludeService) { + fc.Spec.FlowLogsFileIncludeService = ptr.BoolToPtr(true) + patchRequired = true + } + + if fc.Spec.FlowLogsFileIncludePolicies == nil || !(*fc.Spec.FlowLogsFileIncludePolicies) { + fc.Spec.FlowLogsFileIncludePolicies = ptr.BoolToPtr(true) + patchRequired = true + } + + if fc.Spec.FlowLogsEnableHostEndpoint == nil || !(*fc.Spec.FlowLogsEnableHostEndpoint) { + fc.Spec.FlowLogsEnableHostEndpoint = ptr.BoolToPtr(true) + patchRequired = true + } + + if fc.Spec.FlowLogsEnableNetworkSets == nil || !(*fc.Spec.FlowLogsEnableNetworkSets) { + fc.Spec.FlowLogsEnableNetworkSets = ptr.BoolToPtr(true) + patchRequired = true + } + + if fc.Spec.FlowLogsFileIncludeLabels == nil || !(*fc.Spec.FlowLogsFileIncludeLabels) { + fc.Spec.FlowLogsFileIncludeLabels = ptr.BoolToPtr(true) + patchRequired = true + } + return patchRequired // proceed with this patch + }) + return err +} + func hasWindowsNodes(c client.Client) (bool, error) { nodes := corev1.NodeList{} err := c.List(context.Background(), &nodes, client.MatchingLabels{"kubernetes.io/os": "windows"}) diff --git a/pkg/controller/logcollector/logcollector_controller_test.go b/pkg/controller/logcollector/logcollector_controller_test.go index 1ef8426def..1b5f5f03a8 100644 --- a/pkg/controller/logcollector/logcollector_controller_test.go +++ b/pkg/controller/logcollector/logcollector_controller_test.go @@ -49,7 +49,7 @@ import ( "github.com/tigera/operator/test" ) -var _ = Describe("LogCollector controller tests", func() { +var _ = Describe("LogCollector controller tests(Calico Enterprise)", func() { var c client.Client var ctx context.Context var r ReconcileLogCollector @@ -764,7 +764,7 @@ var _ = Describe("LogCollector controller tests", func() { logCollector := operatorv1.LogCollector{Spec: operatorv1.LogCollectorSpec{AdditionalStores: &operatorv1.AdditionalLogStoreSpec{ Syslog: &operatorv1.SyslogStoreSpec{}, }}} - modifiedFields := fillDefaults(&logCollector) + modifiedFields := fillDefaults(&logCollector, operatorv1.TigeraSecureEnterprise) expectedFields := []string{"CollectProcessPath", "AdditionalStores.Syslog.LogTypes", "AdditionalStores.Syslog.Encryption"} expectedLogTypes := []operatorv1.SyslogLogType{ operatorv1.SyslogLogAudit, @@ -786,7 +786,7 @@ var _ = Describe("LogCollector controller tests", func() { logCollector.Spec.CollectProcessPath = &processPath logCollector.Spec.AdditionalStores.Syslog.LogTypes = []operatorv1.SyslogLogType{operatorv1.SyslogLogAudit} logCollector.Spec.AdditionalStores.Syslog.Encryption = operatorv1.EncryptionNone - modifiedFields := fillDefaults(&logCollector) + modifiedFields := fillDefaults(&logCollector, operatorv1.TigeraSecureEnterprise) Expect(*logCollector.Spec.CollectProcessPath).To(Equal(operatorv1.CollectProcessPathDisable)) expectedLogTypes := []operatorv1.SyslogLogType{ operatorv1.SyslogLogAudit, @@ -797,7 +797,7 @@ var _ = Describe("LogCollector controller tests", func() { }) }) -var _ = Describe("LogCollector controller tests (OSS)", func() { +var _ = Describe("LogCollector controller tests (Calico)", func() { var c client.Client var ctx context.Context var r ReconcileLogCollector @@ -925,7 +925,14 @@ var _ = Describe("LogCollector controller tests (OSS)", func() { }) }) - Context("should enabled flow logs felix configs", func() { + Context("should test fillDefaults", func() { + It("should not modify any fields", func() { + modifiedFields := fillDefaults(&operatorv1.LogCollector{}, operatorv1.Calico) + Expect(len(modifiedFields)).To(Equal(0)) + }) + }) + + Context("should enable flow logs felix configs", func() { It("should set the proper flow logs felix configs", func() { _, err := r.Reconcile(ctx, reconcile.Request{}) Expect(err).ShouldNot(HaveOccurred()) @@ -936,6 +943,7 @@ var _ = Describe("LogCollector controller tests (OSS)", func() { Expect(*fc.Spec.FlowLogsFileIncludeService).Should(BeTrue()) Expect(*fc.Spec.FlowLogsEnableHostEndpoint).Should(BeTrue()) Expect(*fc.Spec.FlowLogsEnableNetworkSets).Should(BeTrue()) + Expect(*fc.Spec.FlowLogsFileIncludeLabels).Should(BeTrue()) }) }) diff --git a/pkg/controller/logstorage/managedcluster/managed_cluster_controller_test.go b/pkg/controller/logstorage/managedcluster/managed_cluster_controller_test.go index 7c851cee2b..a5dfa425cf 100644 --- a/pkg/controller/logstorage/managedcluster/managed_cluster_controller_test.go +++ b/pkg/controller/logstorage/managedcluster/managed_cluster_controller_test.go @@ -39,7 +39,6 @@ import ( "github.com/tigera/operator/pkg/controller/utils" "github.com/tigera/operator/pkg/dns" "github.com/tigera/operator/pkg/render" - "github.com/tigera/operator/pkg/render/logstorage/esgateway" ) func NewReconcilerWithShims( @@ -63,7 +62,7 @@ func NewReconcilerWithShims( return r, nil } -var _ = Describe("LogStorageManagedCluster controller", func() { +var _ = Describe("LogStorageManagedCluster controller (Calico Enterprise)", func() { var ( cli client.Client scheme *runtime.Scheme @@ -114,7 +113,7 @@ var _ = Describe("LogStorageManagedCluster controller", func() { Expect(err).ShouldNot(HaveOccurred()) svc := &corev1.Service{} Expect( - cli.Get(ctx, client.ObjectKey{Name: esgateway.ServiceName, Namespace: render.ElasticsearchNamespace}, svc), + cli.Get(ctx, client.ObjectKey{Name: render.LinseedServiceName, Namespace: render.ElasticsearchNamespace}, svc), ).ShouldNot(HaveOccurred()) Expect(svc.Spec.ExternalName).Should(Equal(expectedSvcName)) @@ -146,7 +145,7 @@ var _ = Describe("LogStorageManagedCluster controller", func() { }) -var _ = Describe("LogStorageManagedCluster controller OSS", func() { +var _ = Describe("LogStorageManagedCluster controller (Calico)", func() { var ( cli client.Client scheme *runtime.Scheme @@ -172,7 +171,7 @@ var _ = Describe("LogStorageManagedCluster controller OSS", func() { Name: "default", }, Status: operatorv1.InstallationStatus{ - Variant: operatorv1.TigeraSecureEnterprise, + Variant: operatorv1.Calico, Computed: &operatorv1.InstallationSpec{}, }, Spec: operatorv1.InstallationSpec{ diff --git a/pkg/render/fluentd.go b/pkg/render/fluentd.go index ede6b06df1..6ccfb7b1d3 100644 --- a/pkg/render/fluentd.go +++ b/pkg/render/fluentd.go @@ -128,12 +128,11 @@ type SplunkCredential struct { } func Fluentd(cfg *FluentdConfiguration) Component { - fluentd := &fluentdComponent{ + return &fluentdComponent{ cfg: cfg, probeTimeout: 10, probePeriod: 60, } - return fluentd } type EksCloudwatchLogConfig struct { @@ -183,7 +182,10 @@ func (c *fluentdComponent) ResolveImages(is *operatorv1.ImageSet) error { path := c.cfg.Installation.ImagePath prefix := c.cfg.Installation.ImagePrefix - if c.cfg.OSType == rmeta.OSTypeWindows && c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { + if c.cfg.OSType == rmeta.OSTypeWindows { + if c.cfg.Installation.Variant == operatorv1.Calico { + return fmt.Errorf("Calico does not support windows fluentd") + } var err error c.image, err = components.GetReference(components.ComponentTigeraFluentdWindows, reg, path, prefix, is) return err @@ -261,7 +263,10 @@ func (c *fluentdComponent) Objects() ([]client.Object, []client.Object) { objs = append(objs, secret.ToRuntimeObjects(secret.CopyToNamespace(LogCollectorNamespace, c.cfg.PullSecrets...)...)...) if c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { objs = append(objs, c.allowTigeraPolicy()) + } else { + toDelete = append(toDelete, c.allowTigeraPolicy()) } + objs = append(objs, c.metricsService()) if c.cfg.Installation.KubernetesProvider == operatorv1.ProviderGKE { @@ -280,7 +285,7 @@ func (c *fluentdComponent) Objects() ([]client.Object, []client.Object) { if c.cfg.Filters != nil { objs = append(objs, c.filtersConfigMap()) } - if c.cfg.EKSConfig != nil && c.cfg.OSType == rmeta.OSTypeLinux && c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { + if c.cfg.EKSConfig != nil && c.cfg.OSType == rmeta.OSTypeLinux { if c.cfg.UsePSP { objs = append(objs, c.eksLogForwarderClusterRole(), @@ -626,9 +631,9 @@ func (c *fluentdComponent) envvars() []corev1.EnvVar { {Name: "LINSEED_ENABLED", Value: "true"}, {Name: "LINSEED_ENDPOINT", Value: relasticsearch.LinseedEndpoint(c.SupportedOSType(), c.cfg.ClusterDomain, linseedNS)}, {Name: "LINSEED_CA_PATH", Value: c.trustedBundlePath()}, - {Name: "FLUENT_UID", Value: "0"}, {Name: "TLS_KEY_PATH", Value: c.keyPath()}, {Name: "TLS_CRT_PATH", Value: c.certPath()}, + {Name: "FLUENT_UID", Value: "0"}, {Name: "FLOW_LOG_FILE", Value: c.path("/var/log/calico/flowlogs/flows.log")}, {Name: "FLUENTD_ES_SECURE", Value: "true"}, {Name: "NODENAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "spec.nodeName"}}}, diff --git a/pkg/render/fluentd_test.go b/pkg/render/fluentd_test.go index dca7746ec9..7fec588531 100644 --- a/pkg/render/fluentd_test.go +++ b/pkg/render/fluentd_test.go @@ -22,9 +22,12 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" @@ -104,24 +107,18 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { })) }) - It("should render with a default configuration", func() { - expectedResources := []struct { - name string - ns string - group string - version string - kind string - }{ - {name: "tigera-fluentd", ns: "", group: "", version: "v1", kind: "Namespace"}, - {name: render.FluentdPolicyName, ns: render.LogCollectorNamespace, group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, - {name: render.FluentdMetricsService, ns: render.LogCollectorNamespace, group: "", version: "v1", kind: "Service"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, - {name: "tigera-fluentd", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "", version: "v1", kind: "ServiceAccount"}, - {name: render.PacketCaptureAPIRole, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, - {name: render.PacketCaptureAPIRoleBinding, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "RoleBinding"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "apps", version: "v1", kind: "DaemonSet"}, + It("should render with a default configuration Calico Enterprise", func() { + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}}, + &v3.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdPolicyName, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdMetricsService, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "PodSecurityPolicy", APIVersion: "policy/v1beta1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRole, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRoleBinding, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "DaemonSet", APIVersion: "apps/v1"}}, } // Should render the correct resources. @@ -129,11 +126,7 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { resources, _ := component.Objects() Expect(len(resources)).To(Equal(len(expectedResources))) - i := 0 - for _, expectedRes := range expectedResources { - rtest.CompareResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ - } + rtest.ExpectResources(resources, expectedResources) // Check the namespace. ns := rtest.GetResource(resources, "tigera-fluentd", "", "", "v1", "Namespace").(*corev1.Namespace) @@ -222,21 +215,15 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { Expect(ms.Spec.ClusterIP).To(Equal("None"), "metrics service should be headless to prevent kube-proxy from rendering too many iptables rules") }) - It("should render with a default configuration for calico OSS", func() { - expectedResources := []struct { - name string - ns string - group string - version string - kind string - }{ - {name: "tigera-fluentd", ns: "", group: "", version: "v1", kind: "Namespace"}, - {name: render.FluentdMetricsService, ns: render.LogCollectorNamespace, group: "", version: "v1", kind: "Service"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, - {name: "tigera-fluentd", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "", version: "v1", kind: "ServiceAccount"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "apps", version: "v1", kind: "DaemonSet"}, + It("should render with a default configuration for Calico", func() { + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdMetricsService, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "PodSecurityPolicy", APIVersion: "policy/v1beta1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "DaemonSet", APIVersion: "apps/v1"}}, } // Should render the correct resources. @@ -245,11 +232,7 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { resources, _ := component.Objects() Expect(len(resources)).To(Equal(len(expectedResources))) - i := 0 - for _, expectedRes := range expectedResources { - rtest.CompareResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ - } + rtest.ExpectResources(resources, expectedResources) // Check the namespace. ns := rtest.GetResource(resources, "tigera-fluentd", "", "", "v1", "Namespace").(*corev1.Namespace) @@ -326,22 +309,16 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { }) It("should render for Windows nodes", func() { - expectedResources := []struct { - name string - ns string - group string - version string - kind string - }{ - {name: "tigera-fluentd", ns: "", group: "", version: "v1", kind: "Namespace"}, - {name: render.FluentdPolicyName, ns: render.LogCollectorNamespace, group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, - {name: render.FluentdMetricsService, ns: render.LogCollectorNamespace, group: "", version: "v1", kind: "Service"}, - {name: "tigera-fluentd-windows", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, - {name: "tigera-fluentd-windows", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, - {name: "fluentd-node-windows", ns: "tigera-fluentd", group: "", version: "v1", kind: "ServiceAccount"}, - {name: render.PacketCaptureAPIRole, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, - {name: render.PacketCaptureAPIRoleBinding, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "RoleBinding"}, - {name: "fluentd-node-windows", ns: "tigera-fluentd", group: "apps", version: "v1", kind: "DaemonSet"}, + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}}, + &v3.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdPolicyName, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdMetricsService, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd-windows"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd-windows"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node-windows", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRole, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRoleBinding, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node-windows", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "DaemonSet", APIVersion: "apps/v1"}}, } cfg.OSType = rmeta.OSTypeWindows @@ -350,11 +327,7 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { resources, _ := component.Objects() Expect(len(resources)).To(Equal(len(expectedResources))) - i := 0 - for _, expectedRes := range expectedResources { - rtest.CompareResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ - } + rtest.ExpectResources(resources, expectedResources) ds := rtest.GetResource(resources, "fluentd-node-windows", "tigera-fluentd", "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) Expect(ds.Spec.Template.Spec.Volumes[0].VolumeSource.HostPath.Path).To(Equal("c:/TigeraCalico")) @@ -433,24 +406,18 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { }, } - expectedResources := []struct { - name string - ns string - group string - version string - kind string - }{ - {name: "tigera-fluentd", ns: "", group: "", version: "v1", kind: "Namespace"}, - {name: render.FluentdPolicyName, ns: render.LogCollectorNamespace, group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, - {name: render.FluentdMetricsService, ns: render.LogCollectorNamespace, group: "", version: "v1", kind: "Service"}, - {name: "log-collector-s3-credentials", ns: "tigera-fluentd", group: "", version: "v1", kind: "Secret"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, - {name: "tigera-fluentd", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "", version: "v1", kind: "ServiceAccount"}, - {name: render.PacketCaptureAPIRole, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, - {name: render.PacketCaptureAPIRoleBinding, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "RoleBinding"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "apps", version: "v1", kind: "DaemonSet"}, + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}}, + &v3.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdPolicyName, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdMetricsService, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "log-collector-s3-credentials", Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "PodSecurityPolicy", APIVersion: "policy/v1beta1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRole, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRoleBinding, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "DaemonSet", APIVersion: "apps/v1"}}, } // Should render the correct resources. @@ -458,11 +425,7 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { resources, _ := component.Objects() Expect(len(resources)).To(Equal(len(expectedResources))) - i := 0 - for _, expectedRes := range expectedResources { - rtest.CompareResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ - } + rtest.ExpectResources(resources, expectedResources) ds := rtest.GetResource(resources, "fluentd-node", "tigera-fluentd", "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) Expect(ds.Spec.Template.Spec.Containers).To(HaveLen(1)) @@ -500,23 +463,17 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { } }) It("should render with Syslog configuration", func() { - expectedResources := []struct { - name string - ns string - group string - version string - kind string - }{ - {name: "tigera-fluentd", ns: "", group: "", version: "v1", kind: "Namespace"}, - {name: render.FluentdPolicyName, ns: render.LogCollectorNamespace, group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, - {name: render.FluentdMetricsService, ns: render.LogCollectorNamespace, group: "", version: "v1", kind: "Service"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, - {name: "tigera-fluentd", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "", version: "v1", kind: "ServiceAccount"}, - {name: render.PacketCaptureAPIRole, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, - {name: render.PacketCaptureAPIRoleBinding, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "RoleBinding"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "apps", version: "v1", kind: "DaemonSet"}, + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}}, + &v3.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdPolicyName, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdMetricsService, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "PodSecurityPolicy", APIVersion: "policy/v1beta1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRole, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRoleBinding, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "DaemonSet", APIVersion: "apps/v1"}}, } var ps int32 = 180 @@ -535,12 +492,7 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { resources, _ := component.Objects() Expect(len(resources)).To(Equal(len(expectedResources))) - // Should render the correct resources. - i := 0 - for _, expectedRes := range expectedResources { - rtest.CompareResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ - } + rtest.ExpectResources(resources, expectedResources) ds := rtest.GetResource(resources, "fluentd-node", "tigera-fluentd", "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) Expect(ds.Spec.Template.Spec.Containers).To(HaveLen(1)) @@ -681,25 +633,19 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { }, } - expectedResources := []struct { - name string - ns string - group string - version string - kind string - }{ - {name: "tigera-fluentd", ns: "", group: "", version: "v1", kind: "Namespace"}, - {name: render.FluentdPolicyName, ns: render.LogCollectorNamespace, group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, - {name: render.FluentdMetricsService, ns: render.LogCollectorNamespace, group: "", version: "v1", kind: "Service"}, - {name: "logcollector-splunk-credentials", ns: "tigera-fluentd", group: "", version: "v1", kind: "Secret"}, - {name: "logcollector-splunk-public-certificate", ns: "tigera-fluentd", group: "", version: "v1", kind: "Secret"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, - {name: "tigera-fluentd", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "", version: "v1", kind: "ServiceAccount"}, - {name: render.PacketCaptureAPIRole, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, - {name: render.PacketCaptureAPIRoleBinding, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "RoleBinding"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "apps", version: "v1", kind: "DaemonSet"}, + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}}, + &v3.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdPolicyName, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdMetricsService, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "logcollector-splunk-credentials", Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "logcollector-splunk-public-certificate", Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "PodSecurityPolicy", APIVersion: "policy/v1beta1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRole, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRoleBinding, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "DaemonSet", APIVersion: "apps/v1"}}, } // Should render the correct resources. @@ -707,11 +653,7 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { resources, _ := component.Objects() Expect(len(resources)).To(Equal(len(expectedResources))) - i := 0 - for _, expectedRes := range expectedResources { - rtest.CompareResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ - } + rtest.ExpectResources(resources, expectedResources) ds := rtest.GetResource(resources, "fluentd-node", "tigera-fluentd", "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) Expect(ds.Spec.Template.Spec.Containers).To(HaveLen(1)) @@ -768,24 +710,18 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { }, } - expectedResources := []struct { - name string - ns string - group string - version string - kind string - }{ - {name: "tigera-fluentd", ns: "", group: "", version: "v1", kind: "Namespace"}, - {name: render.FluentdPolicyName, ns: render.LogCollectorNamespace, group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, - {name: render.FluentdMetricsService, ns: render.LogCollectorNamespace, group: "", version: "v1", kind: "Service"}, - {name: "logcollector-splunk-credentials", ns: "tigera-fluentd", group: "", version: "v1", kind: "Secret"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, - {name: "tigera-fluentd", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "", version: "v1", kind: "ServiceAccount"}, - {name: render.PacketCaptureAPIRole, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, - {name: render.PacketCaptureAPIRoleBinding, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "RoleBinding"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "apps", version: "v1", kind: "DaemonSet"}, + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}}, + &v3.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdPolicyName, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdMetricsService, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "logcollector-splunk-credentials", Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "PodSecurityPolicy", APIVersion: "policy/v1beta1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRole, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRoleBinding, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "DaemonSet", APIVersion: "apps/v1"}}, } // Should render the correct resources. @@ -793,12 +729,7 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { resources, _ := component.Objects() Expect(len(resources)).To(Equal(len(expectedResources))) - i := 0 - for _, expectedRes := range expectedResources { - rtest.CompareResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ - } - + rtest.ExpectResources(resources, expectedResources) ds := rtest.GetResource(resources, "fluentd-node", "tigera-fluentd", "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) Expect(ds.Spec.Template.Spec.Containers).To(HaveLen(1)) envs := ds.Spec.Template.Spec.Containers[0].Env @@ -842,36 +773,25 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { Flow: "flow-filter", } - expectedResources := []struct { - name string - ns string - group string - version string - kind string - }{ - {name: "tigera-fluentd", ns: "", group: "", version: "v1", kind: "Namespace"}, - {name: render.FluentdPolicyName, ns: render.LogCollectorNamespace, group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, - {name: render.FluentdMetricsService, ns: render.LogCollectorNamespace, group: "", version: "v1", kind: "Service"}, - {name: "fluentd-filters", ns: "tigera-fluentd", group: "", version: "v1", kind: "ConfigMap"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, - {name: "tigera-fluentd", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "", version: "v1", kind: "ServiceAccount"}, - {name: render.PacketCaptureAPIRole, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, - {name: render.PacketCaptureAPIRoleBinding, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "RoleBinding"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "apps", version: "v1", kind: "DaemonSet"}, + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}}, + &v3.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdPolicyName, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdMetricsService, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-filters", Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "PodSecurityPolicy", APIVersion: "policy/v1beta1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRole, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRoleBinding, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "DaemonSet", APIVersion: "apps/v1"}}, } - // Should render the correct resources. component := render.Fluentd(cfg) resources, _ := component.Objects() Expect(len(resources)).To(Equal(len(expectedResources))) - i := 0 - for _, expectedRes := range expectedResources { - rtest.CompareResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ - } + rtest.ExpectResources(resources, expectedResources) ds := rtest.GetResource(resources, "fluentd-node", "tigera-fluentd", "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) Expect(ds.Spec.Template.Spec.Containers).To(HaveLen(1)) @@ -882,30 +802,23 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { }) It("should render with EKS Cloudwatch Log", func() { - expectedResources := []struct { - name string - ns string - group string - version string - kind string - }{ - {name: "tigera-fluentd", ns: "", group: "", version: "v1", kind: "Namespace"}, - {name: render.FluentdPolicyName, ns: render.LogCollectorNamespace, group: "projectcalico.org", version: "v3", kind: "NetworkPolicy"}, - {name: render.FluentdMetricsService, ns: render.LogCollectorNamespace, group: "", version: "v1", kind: "Service"}, - {name: "eks-log-forwarder", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, - {name: "eks-log-forwarder", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, - {name: "eks-log-forwarder", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, - {name: "eks-log-forwarder", ns: "tigera-fluentd", group: "", version: "v1", kind: "ServiceAccount"}, - {name: "tigera-eks-log-forwarder-secret", ns: "tigera-fluentd", group: "", version: "v1", kind: "Secret"}, - {name: "eks-log-forwarder", ns: "tigera-fluentd", group: "apps", version: "v1", kind: "Deployment"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRole"}, - {name: "tigera-fluentd", ns: "", group: "rbac.authorization.k8s.io", version: "v1", kind: "ClusterRoleBinding"}, - {name: "tigera-fluentd", ns: "", group: "policy", version: "v1beta1", kind: "PodSecurityPolicy"}, - {name: "fluentd-node", ns: "tigera-fluentd", group: "", version: "v1", kind: "ServiceAccount"}, - {name: render.PacketCaptureAPIRole, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "Role"}, - {name: render.PacketCaptureAPIRoleBinding, ns: render.LogCollectorNamespace, group: "rbac.authorization.k8s.io", version: "v1", kind: "RoleBinding"}, - // Daemonset - {name: "fluentd-node", ns: "tigera-fluentd", group: "apps", version: "v1", kind: "DaemonSet"}, + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}}, + &v3.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdPolicyName, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "NetworkPolicy", APIVersion: "projectcalico.org/v3"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: render.FluentdMetricsService, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "eks-log-forwarder"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "eks-log-forwarder"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "eks-log-forwarder"}, TypeMeta: metav1.TypeMeta{Kind: "PodSecurityPolicy", APIVersion: "policy/v1beta1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "eks-log-forwarder", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "tigera-eks-log-forwarder-secret", Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}}, + &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "eks-log-forwarder", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "PodSecurityPolicy", APIVersion: "policy/v1beta1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRole, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: render.PacketCaptureAPIRoleBinding, Namespace: render.LogCollectorNamespace}, TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-node", Namespace: "tigera-fluentd"}, TypeMeta: metav1.TypeMeta{Kind: "DaemonSet", APIVersion: "apps/v1"}}, } fetchInterval := int32(900) @@ -931,11 +844,7 @@ var _ = Describe("Tigera Secure Fluentd rendering tests", func() { Expect(len(resources)).To(Equal(len(expectedResources))) // Should render the correct resources. - i := 0 - for _, expectedRes := range expectedResources { - rtest.CompareResource(resources[i], expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - i++ - } + rtest.ExpectResources(resources, expectedResources) deploy := rtest.GetResource(resources, "eks-log-forwarder", "tigera-fluentd", "apps", "v1", "Deployment").(*appsv1.Deployment) Expect(deploy.Spec.Template.Spec.InitContainers).To(HaveLen(1)) diff --git a/pkg/render/guardian_test.go b/pkg/render/guardian_test.go index cf551debfa..1369cdcb9d 100644 --- a/pkg/render/guardian_test.go +++ b/pkg/render/guardian_test.go @@ -84,7 +84,7 @@ var _ = Describe("Guardian Rendering tests", func() { } } - Context("Guardian component OSS", func() { + Context("Guardian component (Calico)", func() { renderGuardian := func(i operatorv1.InstallationSpec) { cfg = createGuardianConfig(i, "127.0.0.1:1234", false) g = render.Guardian(cfg) @@ -96,7 +96,7 @@ var _ = Describe("Guardian Rendering tests", func() { renderGuardian(operatorv1.InstallationSpec{Registry: "my-reg/", Variant: operatorv1.Calico}) }) - It("should render all resources for a managed OSS cluster", func() { + It("should render all resources for a managed Calico cluster", func() { expectedResources := []struct { name string ns string @@ -168,7 +168,8 @@ var _ = Describe("Guardian Rendering tests", func() { Expect(deployment.Spec.Template.Spec.Tolerations).Should(ContainElements(append(rmeta.TolerateCriticalAddonsAndControlPlane, t))) }) }) - Context("Guardian component", func() { + + Context("Guardian component (Calico Enterprise)", func() { renderGuardian := func(i operatorv1.InstallationSpec) { cfg = createGuardianConfig(i, "127.0.0.1:1234", false) g = render.Guardian(cfg) diff --git a/pkg/render/logstorage.go b/pkg/render/logstorage.go index 5d6a61da0f..52fad79caa 100644 --- a/pkg/render/logstorage.go +++ b/pkg/render/logstorage.go @@ -2066,9 +2066,6 @@ func (m *managedClusterLogStorage) Objects() (objsToCreate []client.Object, objs role, binding, ) - if m.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { - toCreate = append(toCreate, m.elasticsearchExternalService()) - } return toCreate, nil } @@ -2094,20 +2091,6 @@ func (m *managedClusterLogStorage) linseedExternalService() *corev1.Service { } } -func (m *managedClusterLogStorage) elasticsearchExternalService() *corev1.Service { - return &corev1.Service{ - TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}, - ObjectMeta: metav1.ObjectMeta{ - Name: ESGatewayServiceName, - Namespace: ElasticsearchNamespace, - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeExternalName, - ExternalName: fmt.Sprintf("%s.%s.svc.%s", GuardianServiceName, GuardianNamespace, m.cfg.ClusterDomain), - }, - } -} - // In managed clusters, we need to provision a role and binding for linseed to provide permissions // to create configmaps. func (m managedClusterLogStorage) linseedExternalRoleAndBinding() (*rbacv1.ClusterRole, *rbacv1.RoleBinding) { diff --git a/pkg/render/logstorage_test.go b/pkg/render/logstorage_test.go index c8aaa8fddb..4c6e9c2332 100644 --- a/pkg/render/logstorage_test.go +++ b/pkg/render/logstorage_test.go @@ -964,7 +964,7 @@ var _ = Describe("Elasticsearch rendering tests", func() { }) }) - Context("Managed cluster OSS", func() { + Context("Managed cluster (Calico)", func() { var cfg *render.ManagedClusterLogStorageConfiguration BeforeEach(func() { @@ -1001,7 +1001,7 @@ var _ = Describe("Elasticsearch rendering tests", func() { }) }) - Context("Managed cluster", func() { + Context("Managed cluster (Calico Enterprise)", func() { var cfg *render.ManagedClusterLogStorageConfiguration var managementClusterConnection *operatorv1.ManagementClusterConnection @@ -1031,11 +1031,6 @@ var _ = Describe("Elasticsearch rendering tests", func() { }}, {"tigera-linseed", "", &rbacv1.ClusterRole{}, nil}, {"tigera-linseed", "tigera-fluentd", &rbacv1.RoleBinding{}, nil}, - {render.ESGatewayServiceName, render.ElasticsearchNamespace, &corev1.Service{}, func(resource runtime.Object) { - svc := resource.(*corev1.Service) - Expect(svc.Spec.Type).Should(Equal(corev1.ServiceTypeExternalName)) - Expect(svc.Spec.ExternalName).Should(Equal(fmt.Sprintf("%s.%s.svc.%s", render.GuardianServiceName, render.GuardianNamespace, dns.DefaultClusterDomain))) - }}, } component := render.NewManagedClusterLogStorage(cfg) createResources, deleteResources := component.Objects() diff --git a/pkg/render/node.go b/pkg/render/node.go index ab8d3d1736..e9065bdac8 100644 --- a/pkg/render/node.go +++ b/pkg/render/node.go @@ -510,22 +510,10 @@ func (c *nodeComponent) nodeRole() *rbacv1.ClusterRole { "externalnetworks", "licensekeys", "remoteclusterconfigurations", - "stagedglobalnetworkpolicies", - "stagedkubernetesnetworkpolicies", - "stagednetworkpolicies", - "tiers", "packetcaptures", }, Verbs: []string{"get", "list", "watch"}, }, - { - // Tigera Secure creates some tiers on startup. - APIGroups: []string{"crd.projectcalico.org"}, - Resources: []string{ - "tiers", - }, - Verbs: []string{"create"}, - }, { // Tigera Secure updates status for packet captures. APIGroups: []string{"crd.projectcalico.org"}, @@ -1304,7 +1292,7 @@ func (c *nodeComponent) nodeVolumeMounts() []corev1.VolumeMount { nodeVolumeMounts = append(nodeVolumeMounts, corev1.VolumeMount{MountPath: "/var/log/calico", Name: "var-log-calico"}) - if c.cfg.Installation.CNI.Type == operatorv1.PluginCalico { + if c.cfg.Installation.Variant != operatorv1.TigeraSecureEnterprise && c.cfg.Installation.CNI.Type == operatorv1.PluginCalico { cniLogMount := corev1.VolumeMount{MountPath: "/var/log/calico/cni", Name: "cni-log-dir", ReadOnly: false} nodeVolumeMounts = append(nodeVolumeMounts, cniLogMount) } diff --git a/pkg/render/typha.go b/pkg/render/typha.go index fcb97ea635..38016aec71 100644 --- a/pkg/render/typha.go +++ b/pkg/render/typha.go @@ -326,10 +326,6 @@ func (c *typhaComponent) typhaRole() *rbacv1.ClusterRole { Resources: []string{ "licensekeys", "remoteclusterconfigurations", - "stagedglobalnetworkpolicies", - "stagedkubernetesnetworkpolicies", - "stagednetworkpolicies", - "tiers", "packetcaptures", "deeppacketinspections", "externalnetworks", @@ -337,14 +333,6 @@ func (c *typhaComponent) typhaRole() *rbacv1.ClusterRole { }, Verbs: []string{"get", "list", "watch"}, }, - { - // Tigera Secure creates some tiers on startup. - APIGroups: []string{"crd.projectcalico.org"}, - Resources: []string{ - "tiers", - }, - Verbs: []string{"create"}, - }, } role.Rules = append(role.Rules, extraRules...) } From 031643efc0f8fe5c814c00c0b626d016c8308dc3 Mon Sep 17 00:00:00 2001 From: sridhar Date: Thu, 14 Sep 2023 13:28:16 -0700 Subject: [PATCH 3/3] Add support monitor resource --- .../crd.projectcalico.org/v1/felixconfig.go | 18 -- .../v1/zz_generated.deepcopy.go | 30 --- .../installation/core_controller.go | 96 +++++---- .../logcollector/logcollector_controller.go | 46 ---- .../logcollector_controller_test.go | 15 -- pkg/controller/monitor/monitor_controller.go | 197 +++++++++--------- .../monitor/monitor_controller_test.go | 112 +++++++++- pkg/controller/utils/discovery.go | 2 - pkg/controller/utils/utils.go | 12 ++ pkg/render/monitor/monitor.go | 63 ++++-- pkg/render/monitor/monitor_test.go | 184 +++++++++------- pkg/render/node.go | 47 ++++- pkg/render/node_test.go | 136 ++++++++++++ 13 files changed, 614 insertions(+), 344 deletions(-) diff --git a/pkg/apis/crd.projectcalico.org/v1/felixconfig.go b/pkg/apis/crd.projectcalico.org/v1/felixconfig.go index e244501fe3..1bb04d0993 100644 --- a/pkg/apis/crd.projectcalico.org/v1/felixconfig.go +++ b/pkg/apis/crd.projectcalico.org/v1/felixconfig.go @@ -387,24 +387,6 @@ type FelixConfigurationSpec struct { // `[fd00:83a6::12]:5353`.Note that Felix (calico-node) will need RBAC permission to read the details of // each service specified by a `k8s-service:...` form. [Default: "k8s-service:kube-dns"]. DNSTrustedServers *[]string `json:"dnsTrustedServers,omitempty"` - - // FlowLogsFileEnabled controls whether flow logs is enabled. [Default: false] - FlowLogsFileEnabled *bool `json:"flowLogsFileEnabled,omitempty"` - - // FlowLogsFileIncludeService controls whether service information is enabled in the flow logs. [Default: false] - FlowLogsFileIncludeService *bool `json:"flowLogsFileIncludeService,omitempty"` - - // FlowLogsFileIncludePolicies controls whether policy information is enabled in the flow logs. [Default: false] - FlowLogsFileIncludePolicies *bool `json:"flowLogsFileIncludePolicies,omitempty"` - - // FlowLogsEnableHostEndpoint enables flow logs reporting for host endpoints. [Default: false] - FlowLogsEnableHostEndpoint *bool `json:"flowLogsEnableHostEndpoint,omitempty"` - - // FlowLogsEnableNetworkSets enables flow logs reporting for global network sets. [Default: false] - FlowLogsEnableNetworkSets *bool `json:"flowLogsEnableNetworkSets,omitempty"` - - // FlowLogsFileIncludeLabels controls whether labels are reported in the flow logs. [Default: false] - FlowLogsFileIncludeLabels *bool `json:"flowLogsFileIncludeLabels,omitempty"` } type RouteTableRange struct { diff --git a/pkg/apis/crd.projectcalico.org/v1/zz_generated.deepcopy.go b/pkg/apis/crd.projectcalico.org/v1/zz_generated.deepcopy.go index 07b5fc344e..335ac34d4c 100644 --- a/pkg/apis/crd.projectcalico.org/v1/zz_generated.deepcopy.go +++ b/pkg/apis/crd.projectcalico.org/v1/zz_generated.deepcopy.go @@ -698,36 +698,6 @@ func (in *FelixConfigurationSpec) DeepCopyInto(out *FelixConfigurationSpec) { copy(*out, *in) } } - if in.FlowLogsFileEnabled != nil { - in, out := &in.FlowLogsFileEnabled, &out.FlowLogsFileEnabled - *out = new(bool) - **out = **in - } - if in.FlowLogsFileIncludeService != nil { - in, out := &in.FlowLogsFileIncludeService, &out.FlowLogsFileIncludeService - *out = new(bool) - **out = **in - } - if in.FlowLogsFileIncludePolicies != nil { - in, out := &in.FlowLogsFileIncludePolicies, &out.FlowLogsFileIncludePolicies - *out = new(bool) - **out = **in - } - if in.FlowLogsEnableHostEndpoint != nil { - in, out := &in.FlowLogsEnableHostEndpoint, &out.FlowLogsEnableHostEndpoint - *out = new(bool) - **out = **in - } - if in.FlowLogsEnableNetworkSets != nil { - in, out := &in.FlowLogsEnableNetworkSets, &out.FlowLogsEnableNetworkSets - *out = new(bool) - **out = **in - } - if in.FlowLogsFileIncludeLabels != nil { - in, out := &in.FlowLogsFileIncludeLabels, &out.FlowLogsFileIncludeLabels - *out = new(bool) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FelixConfigurationSpec. diff --git a/pkg/controller/installation/core_controller.go b/pkg/controller/installation/core_controller.go index 3ee71a3ad2..78ab3acd22 100644 --- a/pkg/controller/installation/core_controller.go +++ b/pkg/controller/installation/core_controller.go @@ -309,6 +309,18 @@ func add(c controller.Controller, r *ReconcileInstallation) error { return fmt.Errorf("tigera-installation-controller failed to watch BGPConfiguration resource: %w", err) } + // watch for change to primary resource LogCollector + err = c.Watch(&source.Kind{Type: &operator.LogCollector{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return fmt.Errorf("tigera-installation-controller failed to watch primary resource: %v", err) + } + + // watch for change to primary resource monitor + err = c.Watch(&source.Kind{Type: &operator.Monitor{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return fmt.Errorf("tigera-installation-controller failed to watch primary resource: %v", err) + } + if r.enterpriseCRDsExist { // Watch for changes to primary resource ManagementCluster err = c.Watch(&source.Kind{Type: &operator.ManagementCluster{}}, &handler.EnqueueRequestForObject{}) @@ -326,12 +338,6 @@ func add(c controller.Controller, r *ReconcileInstallation) error { return fmt.Errorf("tigera-installation-controller failed to watch secret '%s' in '%s' namespace: %w", monitor.PrometheusTLSSecretName, common.OperatorNamespace(), err) } - // watch for change to primary resource LogCollector - err = c.Watch(&source.Kind{Type: &operator.LogCollector{}}, &handler.EnqueueRequestForObject{}) - if err != nil { - return fmt.Errorf("tigera-installation-controller failed to watch primary resource: %v", err) - } - if r.manageCRDs { if err = addCRDWatches(c, operator.TigeraSecureEnterprise); err != nil { return fmt.Errorf("tigera-installation-controller failed to watch CRD resource: %v", err) @@ -997,19 +1003,26 @@ func (r *ReconcileInstallation) Reconcile(ctx context.Context, request reconcile return reconcile.Result{}, err } + logCollector, err := utils.GetLogCollector(ctx, r.client) + if logCollector != nil { + if err != nil { + r.status.SetDegraded(operator.ResourceReadError, "Error reading LogCollector", err, reqLogger) + return reconcile.Result{}, err + } + } + + monitorResource, err := utils.GetMonitor(ctx, r.client) + if monitorResource != nil { + if err != nil { + r.status.SetDegraded(operator.ResourceReadError, "Error reading Monitor Resource", err, reqLogger) + return reconcile.Result{}, err + } + } + var managementCluster *operator.ManagementCluster var managementClusterConnection *operator.ManagementClusterConnection - var logCollector *operator.LogCollector includeV3NetworkPolicy := false if r.enterpriseCRDsExist { - logCollector, err = utils.GetLogCollector(ctx, r.client) - if logCollector != nil { - if err != nil { - r.status.SetDegraded(operator.ResourceReadError, "Error reading LogCollector", err, reqLogger) - return reconcile.Result{}, err - } - } - managementCluster, err = utils.GetManagementCluster(ctx, r.client) if err != nil { r.status.SetDegraded(operator.ResourceReadError, "Error reading ManagementCluster", err, reqLogger) @@ -1141,35 +1154,35 @@ func (r *ReconcileInstallation) Reconcile(ctx context.Context, request reconcile nodeReporterMetricsPort := defaultNodeReporterPort var nodePrometheusTLS certificatemanagement.KeyPairInterface calicoVersion := components.CalicoRelease - if instance.Spec.Variant == operator.TigeraSecureEnterprise { - // Determine the port to use for nodeReporter metrics. - if felixConfiguration.Spec.PrometheusReporterPort != nil { - nodeReporterMetricsPort = *felixConfiguration.Spec.PrometheusReporterPort - } + // Determine the port to use for nodeReporter metrics. + if felixConfiguration.Spec.PrometheusReporterPort != nil { + nodeReporterMetricsPort = *felixConfiguration.Spec.PrometheusReporterPort + } - if nodeReporterMetricsPort == 0 { - err := errors.New("felixConfiguration prometheusReporterPort=0 not supported") - r.status.SetDegraded(operator.InvalidConfigurationError, "invalid metrics port", err, reqLogger) - return reconcile.Result{}, err - } + if nodeReporterMetricsPort == 0 { + err := errors.New("felixConfiguration prometheusReporterPort=0 not supported") + r.status.SetDegraded(operator.InvalidConfigurationError, "invalid metrics port", err, reqLogger) + return reconcile.Result{}, err + } - nodePrometheusTLS, err = certificateManager.GetOrCreateKeyPair(r.client, render.NodePrometheusTLSServerSecret, common.OperatorNamespace(), dns.GetServiceDNSNames(render.CalicoNodeMetricsService, common.CalicoNamespace, r.clusterDomain)) - if err != nil { - r.status.SetDegraded(operator.ResourceCreateError, "Error creating TLS certificate", err, reqLogger) - return reconcile.Result{}, err - } - if nodePrometheusTLS != nil { - typhaNodeTLS.TrustedBundle.AddCertificates(nodePrometheusTLS) - } - prometheusClientCert, err := certificateManager.GetCertificate(r.client, monitor.PrometheusClientTLSSecretName, common.OperatorNamespace()) - if err != nil { - r.status.SetDegraded(operator.CertificateError, "Unable to fetch prometheus certificate", err, reqLogger) - return reconcile.Result{}, err - } - if prometheusClientCert != nil { - typhaNodeTLS.TrustedBundle.AddCertificates(prometheusClientCert) - } + nodePrometheusTLS, err = certificateManager.GetOrCreateKeyPair(r.client, render.NodePrometheusTLSServerSecret, common.OperatorNamespace(), dns.GetServiceDNSNames(render.CalicoNodeMetricsService, common.CalicoNamespace, r.clusterDomain)) + if err != nil { + r.status.SetDegraded(operator.ResourceCreateError, "Error creating TLS certificate", err, reqLogger) + return reconcile.Result{}, err + } + if nodePrometheusTLS != nil { + typhaNodeTLS.TrustedBundle.AddCertificates(nodePrometheusTLS) + } + prometheusClientCert, err := certificateManager.GetCertificate(r.client, monitor.PrometheusClientTLSSecretName, common.OperatorNamespace()) + if err != nil { + r.status.SetDegraded(operator.CertificateError, "Unable to fetch prometheus certificate", err, reqLogger) + return reconcile.Result{}, err + } + if prometheusClientCert != nil { + typhaNodeTLS.TrustedBundle.AddCertificates(prometheusClientCert) + } + if instance.Spec.Variant == operator.TigeraSecureEnterprise { // es-kube-controllers needs to trust the ESGW certificate. We'll fetch it here and add it to the trusted bundle. // Note that although we're adding this to the typhaNodeTLS trusted bundle, it will be used by es-kube-controllers. This is because @@ -1328,6 +1341,7 @@ func (r *ReconcileInstallation) Reconcile(ctx context.Context, request reconcile Installation: &instance.Spec, AmazonCloudIntegration: aci, LogCollector: logCollector, + MonitorResource: monitorResource, BirdTemplates: birdTemplates, TLS: typhaNodeTLS, ClusterDomain: r.clusterDomain, diff --git a/pkg/controller/logcollector/logcollector_controller.go b/pkg/controller/logcollector/logcollector_controller.go index a3966827f8..be5a39f35b 100644 --- a/pkg/controller/logcollector/logcollector_controller.go +++ b/pkg/controller/logcollector/logcollector_controller.go @@ -39,14 +39,12 @@ import ( operatorv1 "github.com/tigera/operator/api/v1" v1 "github.com/tigera/operator/api/v1" - crdv1 "github.com/tigera/operator/pkg/apis/crd.projectcalico.org/v1" "github.com/tigera/operator/pkg/common" "github.com/tigera/operator/pkg/controller/certificatemanager" "github.com/tigera/operator/pkg/controller/options" "github.com/tigera/operator/pkg/controller/status" "github.com/tigera/operator/pkg/controller/utils" "github.com/tigera/operator/pkg/controller/utils/imageset" - "github.com/tigera/operator/pkg/ptr" "github.com/tigera/operator/pkg/render" rcertificatemanagement "github.com/tigera/operator/pkg/render/certificatemanagement" relasticsearch "github.com/tigera/operator/pkg/render/common/elasticsearch" @@ -580,13 +578,6 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile } } - err = r.patchFelixConfiguration(ctx) - if err != nil { - reqLogger.Error(err, "Error patching felix configuration") - r.status.SetDegraded(operatorv1.ResourcePatchError, "Error patching felix configuration", err, reqLogger) - return reconcile.Result{}, err - } - // Create a component handler to manage the rendered component. handler := utils.NewComponentHandler(log, r.client, r.scheme, instance) @@ -693,43 +684,6 @@ func (r *ReconcileLogCollector) Reconcile(ctx context.Context, request reconcile return reconcile.Result{}, nil } -func (r *ReconcileLogCollector) patchFelixConfiguration(ctx context.Context) error { - _, err := utils.PatchFelixConfiguration(ctx, r.client, func(fc *crdv1.FelixConfiguration) bool { - patchRequired := false - if fc.Spec.FlowLogsFileEnabled == nil || !(*fc.Spec.FlowLogsFileEnabled) { - fc.Spec.FlowLogsFileEnabled = ptr.BoolToPtr(true) - patchRequired = true - } - - if fc.Spec.FlowLogsFileIncludeService == nil || !(*fc.Spec.FlowLogsFileIncludeService) { - fc.Spec.FlowLogsFileIncludeService = ptr.BoolToPtr(true) - patchRequired = true - } - - if fc.Spec.FlowLogsFileIncludePolicies == nil || !(*fc.Spec.FlowLogsFileIncludePolicies) { - fc.Spec.FlowLogsFileIncludePolicies = ptr.BoolToPtr(true) - patchRequired = true - } - - if fc.Spec.FlowLogsEnableHostEndpoint == nil || !(*fc.Spec.FlowLogsEnableHostEndpoint) { - fc.Spec.FlowLogsEnableHostEndpoint = ptr.BoolToPtr(true) - patchRequired = true - } - - if fc.Spec.FlowLogsEnableNetworkSets == nil || !(*fc.Spec.FlowLogsEnableNetworkSets) { - fc.Spec.FlowLogsEnableNetworkSets = ptr.BoolToPtr(true) - patchRequired = true - } - - if fc.Spec.FlowLogsFileIncludeLabels == nil || !(*fc.Spec.FlowLogsFileIncludeLabels) { - fc.Spec.FlowLogsFileIncludeLabels = ptr.BoolToPtr(true) - patchRequired = true - } - return patchRequired // proceed with this patch - }) - return err -} - func hasWindowsNodes(c client.Client) (bool, error) { nodes := corev1.NodeList{} err := c.List(context.Background(), &nodes, client.MatchingLabels{"kubernetes.io/os": "windows"}) diff --git a/pkg/controller/logcollector/logcollector_controller_test.go b/pkg/controller/logcollector/logcollector_controller_test.go index 1b5f5f03a8..373e175270 100644 --- a/pkg/controller/logcollector/logcollector_controller_test.go +++ b/pkg/controller/logcollector/logcollector_controller_test.go @@ -932,21 +932,6 @@ var _ = Describe("LogCollector controller tests (Calico)", func() { }) }) - Context("should enable flow logs felix configs", func() { - It("should set the proper flow logs felix configs", func() { - _, err := r.Reconcile(ctx, reconcile.Request{}) - Expect(err).ShouldNot(HaveOccurred()) - fc := &crdv1.FelixConfiguration{} - Expect(c.Get(ctx, types.NamespacedName{Name: "default", Namespace: ""}, fc)).NotTo(HaveOccurred()) - Expect(*fc.Spec.FlowLogsFileEnabled).Should(BeTrue()) - Expect(*fc.Spec.FlowLogsFileIncludePolicies).Should(BeTrue()) - Expect(*fc.Spec.FlowLogsFileIncludeService).Should(BeTrue()) - Expect(*fc.Spec.FlowLogsEnableHostEndpoint).Should(BeTrue()) - Expect(*fc.Spec.FlowLogsEnableNetworkSets).Should(BeTrue()) - Expect(*fc.Spec.FlowLogsFileIncludeLabels).Should(BeTrue()) - }) - }) - Context("should throw error when additional log collectors are configured", func() { BeforeEach(func() { Expect(c.Delete(ctx, &operatorv1.LogCollector{ diff --git a/pkg/controller/monitor/monitor_controller.go b/pkg/controller/monitor/monitor_controller.go index a1ece0f07f..32632b3782 100644 --- a/pkg/controller/monitor/monitor_controller.go +++ b/pkg/controller/monitor/monitor_controller.go @@ -47,6 +47,7 @@ import ( "github.com/tigera/operator/pkg/dns" "github.com/tigera/operator/pkg/render" rcertificatemanagement "github.com/tigera/operator/pkg/render/certificatemanagement" + rauth "github.com/tigera/operator/pkg/render/common/authentication" "github.com/tigera/operator/pkg/render/common/networkpolicy" rsecret "github.com/tigera/operator/pkg/render/common/secret" "github.com/tigera/operator/pkg/render/kubecontrollers" @@ -60,12 +61,11 @@ const ResourceName = "monitor" var log = logf.Log.WithName("controller_monitor") func Add(mgr manager.Manager, opts options.AddOptions) error { - if !opts.EnterpriseCRDExists { - return nil - } - prometheusReady := &utils.ReadyFlag{} - tierWatchReady := &utils.ReadyFlag{} + var tierWatchReady *utils.ReadyFlag + if opts.EnterpriseCRDExists { + tierWatchReady = &utils.ReadyFlag{} + } // Create the reconciler reconciler := newReconciler(mgr, opts, prometheusReady, tierWatchReady) @@ -82,64 +82,44 @@ func Add(mgr manager.Manager, opts options.AddOptions) error { return err } - policyNames := []types.NamespacedName{ - {Name: monitor.PrometheusPolicyName, Namespace: common.TigeraPrometheusNamespace}, - {Name: monitor.PrometheusAPIPolicyName, Namespace: common.TigeraPrometheusNamespace}, - {Name: monitor.PrometheusOperatorPolicyName, Namespace: common.TigeraPrometheusNamespace}, - {Name: monitor.AlertManagerPolicyName, Namespace: common.TigeraPrometheusNamespace}, - {Name: monitor.MeshAlertManagerPolicyName, Namespace: common.TigeraPrometheusNamespace}, - {Name: networkpolicy.TigeraComponentDefaultDenyPolicyName, Namespace: common.TigeraPrometheusNamespace}, - } - - // Watch for changes to Tier, as its status is used as input to determine whether network policy should be reconciled by this controller. - go utils.WaitToAddTierWatch(networkpolicy.TigeraComponentTierName, controller, k8sClient, log, tierWatchReady) - - go utils.WaitToAddNetworkPolicyWatches(controller, k8sClient, log, policyNames) + if opts.EnterpriseCRDExists { + policyNames := []types.NamespacedName{ + {Name: monitor.PrometheusPolicyName, Namespace: common.TigeraPrometheusNamespace}, + {Name: monitor.PrometheusAPIPolicyName, Namespace: common.TigeraPrometheusNamespace}, + {Name: monitor.PrometheusOperatorPolicyName, Namespace: common.TigeraPrometheusNamespace}, + {Name: monitor.AlertManagerPolicyName, Namespace: common.TigeraPrometheusNamespace}, + {Name: monitor.MeshAlertManagerPolicyName, Namespace: common.TigeraPrometheusNamespace}, + {Name: networkpolicy.TigeraComponentDefaultDenyPolicyName, Namespace: common.TigeraPrometheusNamespace}, + } - go waitToAddPrometheusWatch(controller, k8sClient, log, prometheusReady) + // Watch for changes to Tier, as its status is used as input to determine whether network policy should be reconciled by this controller. + go utils.WaitToAddTierWatch(networkpolicy.TigeraComponentTierName, controller, k8sClient, log, tierWatchReady) - return add(mgr, controller) -} + go utils.WaitToAddNetworkPolicyWatches(controller, k8sClient, log, policyNames) -func newReconciler(mgr manager.Manager, opts options.AddOptions, prometheusReady *utils.ReadyFlag, tierWatchReady *utils.ReadyFlag) reconcile.Reconciler { - r := &ReconcileMonitor{ - client: mgr.GetClient(), - scheme: mgr.GetScheme(), - provider: opts.DetectedProvider, - status: status.New(mgr.GetClient(), "monitor", opts.KubernetesVersion), - prometheusReady: prometheusReady, - tierWatchReady: tierWatchReady, - clusterDomain: opts.ClusterDomain, - usePSP: opts.UsePSP, + err := controller.Watch(&source.Kind{Type: &operatorv1.Authentication{}}, &handler.EnqueueRequestForObject{}) + if err != nil { + return fmt.Errorf("monitor-controller failed to watch resource: %w", err) + } } - r.status.AddStatefulSets([]types.NamespacedName{ - {Namespace: common.TigeraPrometheusNamespace, Name: fmt.Sprintf("alertmanager-%s", monitor.CalicoNodeAlertmanager)}, - {Namespace: common.TigeraPrometheusNamespace, Name: fmt.Sprintf("prometheus-%s", monitor.CalicoNodePrometheus)}, - }) - - r.status.Run(opts.ShutdownContext) - return r -} - -func add(mgr manager.Manager, c controller.Controller) error { - var err error + go waitToAddPrometheusWatch(controller, k8sClient, log, prometheusReady) // watch for primary resource changes - if err = c.Watch(&source.Kind{Type: &operatorv1.Monitor{}}, &handler.EnqueueRequestForObject{}); err != nil { + if err = controller.Watch(&source.Kind{Type: &operatorv1.Monitor{}}, &handler.EnqueueRequestForObject{}); err != nil { return fmt.Errorf("monitor-controller failed to watch primary resource: %w", err) } - if err = utils.AddNetworkWatch(c); err != nil { + if err = utils.AddNetworkWatch(controller); err != nil { return fmt.Errorf("monitor-controller failed to watch Installation resource: %w", err) } - if err = imageset.AddImageSetWatch(c); err != nil { + if err = imageset.AddImageSetWatch(controller); err != nil { return fmt.Errorf("monitor-controller failed to watch ImageSet: %w", err) } // ManagementClusterConnection (in addition to Installation/Network) is used as input to determine whether network policy should be reconciled. - err = c.Watch(&source.Kind{Type: &operatorv1.ManagementClusterConnection{}}, &handler.EnqueueRequestForObject{}) + err = controller.Watch(&source.Kind{Type: &operatorv1.ManagementClusterConnection{}}, &handler.EnqueueRequestForObject{}) if err != nil { return fmt.Errorf("monitor-controller failed to watch ManagementClusterConnection resource: %w", err) } @@ -152,24 +132,40 @@ func add(mgr manager.Manager, c controller.Controller) error { render.NodePrometheusTLSServerSecret, kubecontrollers.KubeControllerPrometheusTLSSecret, } { - if err = utils.AddSecretsWatch(c, secret, common.OperatorNamespace()); err != nil { + if err = utils.AddSecretsWatch(controller, secret, common.OperatorNamespace()); err != nil { return fmt.Errorf("monitor-controller failed to watch secret: %w", err) } } - err = c.Watch(&source.Kind{Type: &operatorv1.Authentication{}}, &handler.EnqueueRequestForObject{}) - if err != nil { - return fmt.Errorf("monitor-controller failed to watch resource: %w", err) - } - // Watch for changes to TigeraStatus. - if err = utils.AddTigeraStatusWatch(c, ResourceName); err != nil { + if err = utils.AddTigeraStatusWatch(controller, ResourceName); err != nil { return fmt.Errorf("monitor-controller failed to watch monitor Tigerastatus: %w", err) } - return nil } +func newReconciler(mgr manager.Manager, opts options.AddOptions, prometheusReady *utils.ReadyFlag, tierWatchReady *utils.ReadyFlag) reconcile.Reconciler { + r := &ReconcileMonitor{ + client: mgr.GetClient(), + scheme: mgr.GetScheme(), + provider: opts.DetectedProvider, + status: status.New(mgr.GetClient(), "monitor", opts.KubernetesVersion), + prometheusReady: prometheusReady, + tierWatchReady: tierWatchReady, + clusterDomain: opts.ClusterDomain, + usePSP: opts.UsePSP, + } + + namespacedName := []types.NamespacedName{{Namespace: common.TigeraPrometheusNamespace, Name: fmt.Sprintf("alertmanager-%s", monitor.CalicoNodeAlertmanager)}} + if opts.EnterpriseCRDExists { + namespacedName = append(namespacedName, types.NamespacedName{Namespace: common.TigeraPrometheusNamespace, Name: fmt.Sprintf("prometheus-%s", monitor.CalicoNodePrometheus)}) + } + r.status.AddStatefulSets(namespacedName) + + r.status.Run(opts.ShutdownContext) + return r +} + // blank assignment to verify that ReconcileMonitor implements reconcile.Reconciler var _ reconcile.Reconciler = &ReconcileMonitor{} @@ -275,13 +271,16 @@ func (r *ReconcileMonitor) Reconcile(ctx context.Context, request reconcile.Requ } trustedBundle := certificateManager.CreateTrustedBundle() - for _, certificateName := range []string{ - esmetrics.ElasticsearchMetricsServerTLSSecret, + certificateNames := []string{ render.FluentdPrometheusTLSSecretName, render.NodePrometheusTLSServerSecret, render.ProjectCalicoAPIServerTLSSecretName(install.Variant), kubecontrollers.KubeControllerPrometheusTLSSecret, - } { + } + if variant == operatorv1.TigeraSecureEnterprise { + certificateNames = append(certificateNames, esmetrics.ElasticsearchMetricsServerTLSSecret) + } + for _, certificateName := range certificateNames { certificate, err := certificateManager.GetCertificate(r.client, certificateName, common.OperatorNamespace()) if err == nil { trustedBundle.AddCertificates(certificate) @@ -290,52 +289,55 @@ func (r *ReconcileMonitor) Reconcile(ctx context.Context, request reconcile.Requ return reconcile.Result{}, err } } - certificateManager.AddToStatusManager(r.status, common.TigeraPrometheusNamespace) - // Fetch the Authentication spec. If present, we use to configure user authentication. - authenticationCR, err := utils.GetAuthentication(ctx, r.client) - if err != nil && !errors.IsNotFound(err) { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying Authentication", err, reqLogger) - return reconcile.Result{}, err - } - if authenticationCR != nil && authenticationCR.Status.State != operatorv1.TigeraStatusReady { - r.status.SetDegraded(operatorv1.ResourceNotReady, fmt.Sprintf("Authentication is not ready - authenticationCR status: %s", authenticationCR.Status.State), err, reqLogger) - return reconcile.Result{}, nil - } - - keyValidatorConfig, err := utils.GetKeyValidatorConfig(ctx, r.client, authenticationCR, r.clusterDomain) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceUpdateError, "Failed to process the authentication CR.", err, reqLogger) - return reconcile.Result{}, err - } + var alertmanagerConfigSecret *corev1.Secret + var keyValidatorConfig rauth.KeyValidatorConfig + createInOperatorNamespace := false + includeV3NetworkPolicy := false + if variant == operatorv1.TigeraSecureEnterprise { + certificateManager.AddToStatusManager(r.status, common.TigeraPrometheusNamespace) - // Validate that the tier watch is ready before querying the tier to ensure we utilize the cache. - if !r.tierWatchReady.IsReady() { - r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Tier watch to be established", nil, reqLogger) - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } + // Fetch the Authentication spec. If present, we use to configure user authentication. + authenticationCR, err := utils.GetAuthentication(ctx, r.client) + if err != nil && !errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying Authentication", err, reqLogger) + return reconcile.Result{}, err + } + if authenticationCR != nil && authenticationCR.Status.State != operatorv1.TigeraStatusReady { + r.status.SetDegraded(operatorv1.ResourceNotReady, fmt.Sprintf("Authentication is not ready - authenticationCR status: %s", authenticationCR.Status.State), err, reqLogger) + return reconcile.Result{}, nil + } - // Ensure the allow-tigera tier exists, before rendering any network policies within it. - includeV3NetworkPolicy := false - if err := r.client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil { - // The creation of the Tier depends on this controller to reconcile it's non-NetworkPolicy resources so that the - // License becomes available (in managed clusters). Therefore, if we fail to query the Tier, we exclude NetworkPolicy - // from reconciliation and tolerate errors arising from the Tier not being created. - if !errors.IsNotFound(err) { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying allow-tigera tier", err, reqLogger) + keyValidatorConfig, err = utils.GetKeyValidatorConfig(ctx, r.client, authenticationCR, r.clusterDomain) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceUpdateError, "Failed to process the authentication CR.", err, reqLogger) return reconcile.Result{}, err } - } else { - includeV3NetworkPolicy = true - } - // Create a component handler to manage the rendered component. - hdler := utils.NewComponentHandler(log, r.client, r.scheme, instance) + // Validate that the tier watch is ready before querying the tier to ensure we utilize the cache. + if !r.tierWatchReady.IsReady() { + r.status.SetDegraded(operatorv1.ResourceNotReady, "Waiting for Tier watch to be established", nil, reqLogger) + return reconcile.Result{RequeueAfter: 10 * time.Second}, nil + } - alertmanagerConfigSecret, createInOperatorNamespace, err := r.readAlertmanagerConfigSecret(ctx) - if err != nil { - r.status.SetDegraded(operatorv1.ResourceReadError, "Error retrieving Alertmanager configuration secret", err, reqLogger) - return reconcile.Result{}, err + // Ensure the allow-tigera tier exists, before rendering any network policies within it. + if err := r.client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil { + // The creation of the Tier depends on this controller to reconcile it's non-NetworkPolicy resources so that the + // License becomes available (in managed clusters). Therefore, if we fail to query the Tier, we exclude NetworkPolicy + // from reconciliation and tolerate errors arising from the Tier not being created. + if !errors.IsNotFound(err) { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying allow-tigera tier", err, reqLogger) + return reconcile.Result{}, err + } + } else { + includeV3NetworkPolicy = true + } + + alertmanagerConfigSecret, createInOperatorNamespace, err = r.readAlertmanagerConfigSecret(ctx) + if err != nil { + r.status.SetDegraded(operatorv1.ResourceReadError, "Error retrieving Alertmanager configuration secret", err, reqLogger) + return reconcile.Result{}, err + } } kubeControllersMetricsPort, err := utils.GetKubeControllerMetricsPort(ctx, r.client) @@ -344,6 +346,9 @@ func (r *ReconcileMonitor) Reconcile(ctx context.Context, request reconcile.Requ return reconcile.Result{}, err } + // Create a component handler to manage the rendered component. + hdler := utils.NewComponentHandler(log, r.client, r.scheme, instance) + monitorCfg := &monitor.Config{ Installation: install, PullSecrets: pullSecrets, diff --git a/pkg/controller/monitor/monitor_controller_test.go b/pkg/controller/monitor/monitor_controller_test.go index 5ed5851e6d..b1f9b88092 100644 --- a/pkg/controller/monitor/monitor_controller_test.go +++ b/pkg/controller/monitor/monitor_controller_test.go @@ -45,7 +45,7 @@ import ( "github.com/tigera/operator/pkg/render/monitor" ) -var _ = Describe("Monitor controller tests", func() { +var _ = Describe("Monitor controller tests Calico Enterprise", func() { var cli client.Client var ctx context.Context var mockStatus *status.MockStatus @@ -506,3 +506,113 @@ var _ = Describe("Monitor controller tests", func() { }) }) }) + +var _ = Describe("Monitor controller tests Calico", func() { + var cli client.Client + var ctx context.Context + var mockStatus *status.MockStatus + var r ReconcileMonitor + var scheme *runtime.Scheme + var installation *operatorv1.Installation + + BeforeEach(func() { + // The schema contains all objects that should be known to the fake client when the test runs. + scheme = runtime.NewScheme() + Expect(apis.AddToScheme(scheme)).NotTo(HaveOccurred()) + Expect(appsv1.SchemeBuilder.AddToScheme(scheme)).NotTo(HaveOccurred()) + Expect(rbacv1.SchemeBuilder.AddToScheme(scheme)).NotTo(HaveOccurred()) + + // Create a client that will have a crud interface of k8s objects. + ctx = context.Background() + cli = fake.NewClientBuilder().WithScheme(scheme).Build() + + // Create an object we can use throughout the test to do the monitor reconcile loops. + mockStatus = &status.MockStatus{} + mockStatus.On("AddCronJobs", mock.Anything) + mockStatus.On("AddDaemonsets", mock.Anything) + mockStatus.On("AddDeployments", mock.Anything).Return() + mockStatus.On("AddStatefulSets", mock.Anything) + mockStatus.On("ClearDegraded") + mockStatus.On("IsAvailable").Return(true) + mockStatus.On("OnCRFound").Return() + mockStatus.On("ReadyToMonitor") + mockStatus.On("RemoveDeployments", mock.Anything) + mockStatus.On("RemoveCertificateSigningRequests", common.TigeraPrometheusNamespace) + mockStatus.On("SetMetaData", mock.Anything).Return() + + // Create an object we can use throughout the test to do the monitor reconcile loops. + r = ReconcileMonitor{ + client: cli, + scheme: scheme, + provider: operatorv1.ProviderNone, + status: mockStatus, + prometheusReady: &utils.ReadyFlag{}, + tierWatchReady: &utils.ReadyFlag{}, + } + + // We start off with a 'standard' installation, with nothing special + installation = &operatorv1.Installation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Generation: 2, + }, + Status: operatorv1.InstallationStatus{ + Variant: operatorv1.TigeraSecureEnterprise, + Computed: &operatorv1.InstallationSpec{}, + }, + Spec: operatorv1.InstallationSpec{ + Variant: operatorv1.Calico, + Registry: "some.registry.org/", + }, + } + Expect(cli.Create(ctx, installation)).To(BeNil()) + + // Apply the Monitor CR to the fake cluster. + Expect(cli.Create(ctx, &operatorv1.Monitor{ + TypeMeta: metav1.TypeMeta{Kind: "Monitor", APIVersion: "operator.tigera.io/v1"}, + ObjectMeta: metav1.ObjectMeta{Name: "tigera-secure"}, + })).NotTo(HaveOccurred()) + Expect(cli.Create(ctx, render.CreateCertificateConfigMap("test", render.TyphaCAConfigMapName, common.OperatorNamespace()))).NotTo(HaveOccurred()) + + // Create a certificate manager and provision the CA to unblock the controller. Generally this would be done by + // the cluster CA controller and is a prerequisite for the monitor controller to function. + cm, err := certificatemanager.Create(cli, &installation.Spec, "cluster.local", common.OperatorNamespace(), certificatemanager.AllowCACreation()) + Expect(err).NotTo(HaveOccurred()) + Expect(cli.Create(ctx, cm.KeyPair().Secret(common.OperatorNamespace()))).NotTo(HaveOccurred()) + + // Mark that watches were successful. + r.prometheusReady.MarkAsReady() + r.tierWatchReady.MarkAsReady() + }) + Context("controller reconciliation", func() { + var ( + am = &monitoringv1.Alertmanager{} + p = &monitoringv1.Prometheus{} + pr = &monitoringv1.PrometheusRule{} + sm = &monitoringv1.ServiceMonitor{} + ) + + BeforeEach(func() { + // Prometheus related objects should not exist. + Expect(cli.Get(ctx, client.ObjectKey{Name: monitor.CalicoNodeAlertmanager, Namespace: common.TigeraPrometheusNamespace}, am)).To(HaveOccurred()) + Expect(cli.Get(ctx, client.ObjectKey{Name: monitor.CalicoNodePrometheus, Namespace: common.TigeraPrometheusNamespace}, p)).To(HaveOccurred()) + Expect(cli.Get(ctx, client.ObjectKey{Name: monitor.TigeraPrometheusDPRate, Namespace: common.TigeraPrometheusNamespace}, pr)).To(HaveOccurred()) + Expect(cli.Get(ctx, client.ObjectKey{Name: monitor.CalicoNodeMonitor, Namespace: common.TigeraPrometheusNamespace}, sm)).To(HaveOccurred()) + Expect(cli.Get(ctx, client.ObjectKey{Name: monitor.ElasticsearchMetrics, Namespace: common.TigeraPrometheusNamespace}, sm)).To(HaveOccurred()) + Expect(cli.Get(ctx, client.ObjectKey{Name: monitor.FluentdMetrics, Namespace: common.TigeraPrometheusNamespace}, sm)).To(HaveOccurred()) + }) + + It("should create Prometheus related resources", func() { + _, err := r.Reconcile(ctx, reconcile.Request{}) + Expect(err).NotTo(HaveOccurred()) + + // Prometheus related objects should be rendered after reconciliation. + Expect(cli.Get(ctx, client.ObjectKey{Name: monitor.CalicoNodeAlertmanager, Namespace: common.TigeraPrometheusNamespace}, am)).To(HaveOccurred()) + Expect(cli.Get(ctx, client.ObjectKey{Name: monitor.CalicoNodePrometheus, Namespace: common.TigeraPrometheusNamespace}, p)).NotTo(HaveOccurred()) + Expect(cli.Get(ctx, client.ObjectKey{Name: monitor.TigeraPrometheusDPRate, Namespace: common.TigeraPrometheusNamespace}, pr)).To(HaveOccurred()) + Expect(cli.Get(ctx, client.ObjectKey{Name: monitor.CalicoNodeMonitor, Namespace: common.TigeraPrometheusNamespace}, sm)).NotTo(HaveOccurred()) + Expect(cli.Get(ctx, client.ObjectKey{Name: monitor.ElasticsearchMetrics, Namespace: common.TigeraPrometheusNamespace}, sm)).To(HaveOccurred()) + Expect(cli.Get(ctx, client.ObjectKey{Name: monitor.FluentdMetrics, Namespace: common.TigeraPrometheusNamespace}, sm)).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/pkg/controller/utils/discovery.go b/pkg/controller/utils/discovery.go index d2d9a0e6be..933271ca09 100644 --- a/pkg/controller/utils/discovery.go +++ b/pkg/controller/utils/discovery.go @@ -55,8 +55,6 @@ func RequiresTigeraSecure(cfg *rest.Config) (bool, error) { fallthrough case "ApplicationLayer": fallthrough - case "Monitor": - fallthrough case "ManagementCluster": fallthrough case "EgressGateway": diff --git a/pkg/controller/utils/utils.go b/pkg/controller/utils/utils.go index b2aa0e8095..2ab0080814 100644 --- a/pkg/controller/utils/utils.go +++ b/pkg/controller/utils/utils.go @@ -269,6 +269,18 @@ func GetLogCollector(ctx context.Context, cli client.Client) (*operatorv1.LogCol return logCollector, nil } +func GetMonitor(ctx context.Context, cli client.Client) (*operatorv1.Monitor, error) { + monitor := &operatorv1.Monitor{} + err := cli.Get(ctx, DefaultTSEEInstanceKey, monitor) + if err != nil { + if kerrors.IsNotFound(err) { + return nil, nil + } + return nil, err + } + return monitor, err +} + // FetchLicenseKey returns the license if it has been installed. It's useful // to prevent rollout of TSEE components that might require it. // It will return an error if the license is not installed/cannot be read diff --git a/pkg/render/monitor/monitor.go b/pkg/render/monitor/monitor.go index 5fcceb48ee..07ee73c20d 100644 --- a/pkg/render/monitor/monitor.go +++ b/pkg/render/monitor/monitor.go @@ -190,7 +190,6 @@ func (mc *monitorComponent) Objects() ([]client.Object, []client.Object) { ) toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(common.TigeraPrometheusNamespace, mc.cfg.PullSecrets...)...)...) - toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(common.TigeraPrometheusNamespace, mc.cfg.AlertmanagerConfigSecret)...)...) toCreate = append(toCreate, mc.prometheusOperatorServiceAccount(), @@ -200,18 +199,23 @@ func (mc *monitorComponent) Objects() ([]client.Object, []client.Object) { mc.prometheusClusterRole(), mc.prometheusClusterRoleBinding(), mc.prometheus(), - mc.alertmanagerService(), - mc.alertmanager(), mc.prometheusServiceService(), mc.prometheusServiceClusterRole(), mc.prometheusServiceClusterRoleBinding(), - mc.prometheusRule(), mc.serviceMonitorCalicoNode(), - mc.serviceMonitorElasticsearch(), mc.serviceMonitorFluentd(), mc.serviceMonitorQueryServer(), mc.serviceMonitorCalicoKubeControllers(), ) + if mc.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { + toCreate = append(toCreate, secret.ToRuntimeObjects(secret.CopyToNamespace(common.TigeraPrometheusNamespace, mc.cfg.AlertmanagerConfigSecret)...)...) + toCreate = append(toCreate, + mc.alertmanagerService(), + mc.alertmanager(), + mc.prometheusRule(), + mc.serviceMonitorElasticsearch(), + ) + } if mc.cfg.KeyValidatorConfig != nil { toCreate = append(toCreate, secret.ToRuntimeObjects(mc.cfg.KeyValidatorConfig.RequiredSecrets(common.TigeraPrometheusNamespace)...)...) @@ -229,6 +233,14 @@ func (mc *monitorComponent) Objects() ([]client.Object, []client.Object) { // Remove the tigera-prometheus-api deployment that was part of release-v1.23, but has been removed since. &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus-api", Namespace: common.TigeraPrometheusNamespace}}, ) + if mc.cfg.Installation.Variant == operatorv1.Calico { + toDelete = append(toDelete, + mc.alertmanagerService(), + mc.alertmanager(), + mc.prometheusRule(), + mc.serviceMonitorElasticsearch(), + ) + } return toCreate, toDelete } @@ -735,6 +747,28 @@ func (mc *monitorComponent) prometheusRule() *monitoringv1.PrometheusRule { } func (mc *monitorComponent) serviceMonitorCalicoNode() *monitoringv1.ServiceMonitor { + endPoints := []monitoringv1.Endpoint{ + { + HonorLabels: true, + Interval: "5s", + Port: "calico-metrics-port", + ScrapeTimeout: "5s", + Scheme: "https", + TLSConfig: mc.tlsConfig(render.CalicoNodeMetricsService), + }, + } + if mc.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { + endPoints = append(endPoints, monitoringv1.Endpoint{ + HonorLabels: true, + Interval: "5s", + Port: "calico-bgp-metrics-port", + ScrapeTimeout: "5s", + Scheme: "https", + TLSConfig: mc.tlsConfig(render.CalicoNodeMetricsService), + }, + ) + } + return &monitoringv1.ServiceMonitor{ TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: MonitoringAPIVersion}, ObjectMeta: metav1.ObjectMeta{ @@ -745,24 +779,7 @@ func (mc *monitorComponent) serviceMonitorCalicoNode() *monitoringv1.ServiceMoni Spec: monitoringv1.ServiceMonitorSpec{ Selector: metav1.LabelSelector{MatchLabels: map[string]string{"k8s-app": "calico-node"}}, NamespaceSelector: monitoringv1.NamespaceSelector{MatchNames: []string{"calico-system"}}, - Endpoints: []monitoringv1.Endpoint{ - { - HonorLabels: true, - Interval: "5s", - Port: "calico-metrics-port", - ScrapeTimeout: "5s", - Scheme: "https", - TLSConfig: mc.tlsConfig(render.CalicoNodeMetricsService), - }, - { - HonorLabels: true, - Interval: "5s", - Port: "calico-bgp-metrics-port", - ScrapeTimeout: "5s", - Scheme: "https", - TLSConfig: mc.tlsConfig(render.CalicoNodeMetricsService), - }, - }, + Endpoints: endPoints, }, } } diff --git a/pkg/render/monitor/monitor_test.go b/pkg/render/monitor/monitor_test.go index 967fcb2446..5d430d4229 100644 --- a/pkg/render/monitor/monitor_test.go +++ b/pkg/render/monitor/monitor_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" v3 "github.com/tigera/api/pkg/apis/projectcalico/v3" @@ -45,6 +46,7 @@ import ( "github.com/tigera/operator/pkg/render/monitor" "github.com/tigera/operator/pkg/render/testutils" "github.com/tigera/operator/pkg/tls/certificatemanagement" + policyv1beta1 "k8s.io/api/policy/v1beta1" ) var _ = Describe("monitor rendering tests", func() { @@ -101,52 +103,98 @@ var _ = Describe("monitor rendering tests", func() { } }) - It("Should render Prometheus resources", func() { + It("Should render Prometheus resources for Calico", func() { + cfg.Installation.Variant = operatorv1.Calico component := monitor.Monitor(cfg) Expect(component.ResolveImages(nil)).NotTo(HaveOccurred()) toCreate, toDelete := component.Objects() // should render correct resources - expectedResources := []struct { - name string - ns string - group string - version string - kind string - }{ - {"tigera-prometheus", "", "", "v1", "Namespace"}, - {"tigera-prometheus-role", common.TigeraPrometheusNamespace, "rbac.authorization.k8s.io", "v1", "Role"}, - {"tigera-prometheus-role-binding", common.TigeraPrometheusNamespace, "rbac.authorization.k8s.io", "v1", "RoleBinding"}, - {"tigera-pull-secret", common.TigeraPrometheusNamespace, "", "", ""}, - {"alertmanager-calico-node-alertmanager", common.TigeraPrometheusNamespace, "", "v1", "Secret"}, - {"calico-prometheus-operator", "tigera-prometheus", "", "v1", "ServiceAccount"}, - {"calico-prometheus-operator", "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, - {"calico-prometheus-operator", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, - {"prometheus", common.TigeraPrometheusNamespace, "", "v1", "ServiceAccount"}, - {"prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, - {"prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, - {"calico-node-prometheus", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.PrometheusesKind}, - {"calico-node-alertmanager", common.TigeraPrometheusNamespace, "", "v1", "Service"}, - {"calico-node-alertmanager", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.AlertmanagersKind}, - {"prometheus-http-api", common.TigeraPrometheusNamespace, "", "v1", "Service"}, - {"tigera-prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, - {"tigera-prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, - {"tigera-prometheus-dp-rate", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.PrometheusRuleKind}, - {"calico-node-monitor", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, - {"elasticsearch-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, - {"fluentd-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, - {"tigera-api", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, - {"calico-kube-controllers-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, - {"tigera-prometheus", "", "policy", "v1beta1", "PodSecurityPolicy"}, + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}}, + &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus-role", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus-role-binding", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "calico-prometheus-operator", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "calico-prometheus-operator"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "calico-prometheus-operator"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "prometheus", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &monitoringv1.Prometheus{ObjectMeta: metav1.ObjectMeta{Name: "calico-node-prometheus", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.PrometheusesKind, APIVersion: "monitoring.coreos.com/v1"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "prometheus-http-api", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "calico-node-monitor", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-metrics", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "tigera-api", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "calico-kube-controllers-metrics", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "ServiceMonitorsKind", APIVersion: "monitoring.coreos.com/v1"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "PodSecurityPolicy", APIVersion: "policy/v1beta1"}}, } Expect(toCreate).To(HaveLen(len(expectedResources))) - for i, expectedRes := range expectedResources { - obj := toCreate[i] - rtest.CompareResource(obj, expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) + rtest.ExpectResources(toCreate, expectedResources) + Expect(toDelete).To(HaveLen(6)) + + // Check the namespace. + namespace := rtest.GetResource(toCreate, "tigera-prometheus", "", "", "v1", "Namespace").(*corev1.Namespace) + Expect(namespace.Labels["pod-security.kubernetes.io/enforce"]).To(Equal("baseline")) + Expect(namespace.Labels["pod-security.kubernetes.io/enforce-version"]).To(Equal("latest")) + // ServiceMonitor + servicemonitorObj, ok := rtest.GetResource(toCreate, monitor.CalicoNodeMonitor, common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind).(*monitoringv1.ServiceMonitor) + Expect(ok).To(BeTrue()) + Expect(servicemonitorObj.ObjectMeta.Labels).To(HaveLen(1)) + Expect(servicemonitorObj.ObjectMeta.Labels["team"]).To(Equal("network-operators")) + Expect(servicemonitorObj.Spec.Selector.MatchLabels).To(HaveLen(1)) + Expect(servicemonitorObj.Spec.Selector.MatchLabels["k8s-app"]).To(Equal("calico-node")) + Expect(servicemonitorObj.Spec.NamespaceSelector.MatchNames).To(HaveLen(1)) + Expect(servicemonitorObj.Spec.NamespaceSelector.MatchNames[0]).To(Equal("calico-system")) + Expect(servicemonitorObj.Spec.Endpoints).To(HaveLen(1)) + Expect(servicemonitorObj.Spec.Endpoints[0].HonorLabels).To(BeTrue()) + Expect(servicemonitorObj.Spec.Endpoints[0].Interval).To(BeEquivalentTo("5s")) + Expect(servicemonitorObj.Spec.Endpoints[0].Port).To(Equal("calico-metrics-port")) + Expect(servicemonitorObj.Spec.Endpoints[0].ScrapeTimeout).To(BeEquivalentTo("5s")) + Expect(servicemonitorObj.Spec.Endpoints[0].Scheme).To(Equal("https")) + }) + + It("Should render Prometheus resources for Calico Enterprise", func() { + cfg.Installation.Variant = operatorv1.TigeraSecureEnterprise + component := monitor.Monitor(cfg) + Expect(component.ResolveImages(nil)).NotTo(HaveOccurred()) + toCreate, toDelete := component.Objects() + + // should render correct resources + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}}, + &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus-role", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus-role-binding", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "alertmanager-calico-node-alertmanager", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "calico-prometheus-operator", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "calico-prometheus-operator"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "calico-prometheus-operator"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "prometheus", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &monitoringv1.Prometheus{ObjectMeta: metav1.ObjectMeta{Name: "calico-node-prometheus", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.PrometheusesKind, APIVersion: "monitoring.coreos.com/v1"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "calico-node-alertmanager", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &monitoringv1.Alertmanager{ObjectMeta: metav1.ObjectMeta{Name: "calico-node-alertmanager", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.AlertmanagersKind, APIVersion: "monitoring.coreos.com/v1"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "prometheus-http-api", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &monitoringv1.PrometheusRule{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus-dp-rate", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.PrometheusRuleKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "calico-node-monitor", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "elasticsearch-metrics", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-metrics", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "tigera-api", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "calico-kube-controllers-metrics", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "ServiceMonitorsKind", APIVersion: "monitoring.coreos.com/v1"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "PodSecurityPolicy", APIVersion: "policy/v1beta1"}}, } + Expect(toCreate).To(HaveLen(len(expectedResources))) + + rtest.ExpectResources(toCreate, expectedResources) Expect(toDelete).To(HaveLen(2)) // Check the namespace. @@ -156,6 +204,7 @@ var _ = Describe("monitor rendering tests", func() { }) It("Should render Prometheus resource Specs correctly", func() { + cfg.Installation.Variant = operatorv1.TigeraSecureEnterprise component := monitor.Monitor(cfg) Expect(component.ResolveImages(nil)).NotTo(HaveOccurred()) toCreate, _ := component.Objects() @@ -539,6 +588,7 @@ var _ = Describe("monitor rendering tests", func() { It("should render properly when PSP is not supported by the cluster", func() { cfg.UsePSP = false + cfg.Installation.Variant = operatorv1.TigeraSecureEnterprise component := monitor.Monitor(cfg) Expect(component.ResolveImages(nil)).To(BeNil()) resources, _ := component.Objects() @@ -564,52 +614,43 @@ var _ = Describe("monitor rendering tests", func() { dns.DefaultClusterDomain) cfg.KeyValidatorConfig = dexCfg cfg.ServerTLSSecret = prometheusKeyPair + cfg.Installation.Variant = operatorv1.TigeraSecureEnterprise component := monitor.Monitor(cfg) Expect(component.ResolveImages(nil)).NotTo(HaveOccurred()) toCreate, toDelete := component.Objects() // should render correct resources - expectedResources := []struct { - name string - ns string - group string - version string - kind string - }{ - {"tigera-prometheus", "", "", "v1", "Namespace"}, - {"tigera-prometheus-role", common.TigeraPrometheusNamespace, "rbac.authorization.k8s.io", "v1", "Role"}, - {"tigera-prometheus-role-binding", common.TigeraPrometheusNamespace, "rbac.authorization.k8s.io", "v1", "RoleBinding"}, - {"tigera-pull-secret", common.TigeraPrometheusNamespace, "", "", ""}, - {"alertmanager-calico-node-alertmanager", common.TigeraPrometheusNamespace, "", "v1", "Secret"}, - {"calico-prometheus-operator", "tigera-prometheus", "", "v1", "ServiceAccount"}, - {"calico-prometheus-operator", "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, - {"calico-prometheus-operator", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, - {"prometheus", common.TigeraPrometheusNamespace, "", "v1", "ServiceAccount"}, - {"prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, - {"prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, - {"calico-node-prometheus", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.PrometheusesKind}, - {"calico-node-alertmanager", common.TigeraPrometheusNamespace, "", "v1", "Service"}, - {"calico-node-alertmanager", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.AlertmanagersKind}, - {"prometheus-http-api", common.TigeraPrometheusNamespace, "", "v1", "Service"}, - {"tigera-prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRole"}, - {"tigera-prometheus", "", "rbac.authorization.k8s.io", "v1", "ClusterRoleBinding"}, - {"tigera-prometheus-dp-rate", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.PrometheusRuleKind}, - {"calico-node-monitor", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, - {"elasticsearch-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, - {"fluentd-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, - {"tigera-api", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, - {"calico-kube-controllers-metrics", common.TigeraPrometheusNamespace, "monitoring.coreos.com", "v1", monitoringv1.ServiceMonitorsKind}, - {"tigera-prometheus", "", "policy", "v1beta1", "PodSecurityPolicy"}, + expectedResources := []client.Object{ + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "Namespace", APIVersion: "v1"}}, + &rbacv1.Role{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus-role", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Role", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.RoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus-role-binding", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "RoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "tigera-pull-secret", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "alertmanager-calico-node-alertmanager", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "calico-prometheus-operator", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "calico-prometheus-operator"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "calico-prometheus-operator"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "prometheus", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "ServiceAccount", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &monitoringv1.Prometheus{ObjectMeta: metav1.ObjectMeta{Name: "calico-node-prometheus", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.PrometheusesKind, APIVersion: "monitoring.coreos.com/v1"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "calico-node-alertmanager", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &monitoringv1.Alertmanager{ObjectMeta: metav1.ObjectMeta{Name: "calico-node-alertmanager", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.AlertmanagersKind, APIVersion: "monitoring.coreos.com/v1"}}, + &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "prometheus-http-api", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRole", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "ClusterRoleBinding", APIVersion: "rbac.authorization.k8s.io/v1"}}, + &monitoringv1.PrometheusRule{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus-dp-rate", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.PrometheusRuleKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "calico-node-monitor", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "elasticsearch-metrics", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "fluentd-metrics", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "tigera-api", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: "monitoring.coreos.com/v1"}}, + &monitoringv1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{Name: "calico-kube-controllers-metrics", Namespace: common.TigeraPrometheusNamespace}, TypeMeta: metav1.TypeMeta{Kind: monitoringv1.ServiceMonitorsKind, APIVersion: "monitoring.coreos.com/v1"}}, + &policyv1beta1.PodSecurityPolicy{ObjectMeta: metav1.ObjectMeta{Name: "tigera-prometheus"}, TypeMeta: metav1.TypeMeta{Kind: "PodSecurityPolicy", APIVersion: "policy/v1beta1"}}, } Expect(toCreate).To(HaveLen(len(expectedResources))) - for i, expectedRes := range expectedResources { - obj := toCreate[i] - rtest.CompareResource(obj, expectedRes.name, expectedRes.ns, expectedRes.group, expectedRes.version, expectedRes.kind) - } - + rtest.ExpectResources(toCreate, expectedResources) Expect(toDelete).To(HaveLen(2)) // Prometheus @@ -743,6 +784,7 @@ var _ = Describe("monitor rendering tests", func() { func(scenario testutils.AllowTigeraScenario) { cfg.Openshift = scenario.Openshift cfg.KubeControllerPort = 9094 + cfg.Installation.Variant = operatorv1.TigeraSecureEnterprise component := monitor.MonitorPolicy(cfg) resourcesToCreate, _ := component.Objects() diff --git a/pkg/render/node.go b/pkg/render/node.go index e9065bdac8..f2d8a81e81 100644 --- a/pkg/render/node.go +++ b/pkg/render/node.go @@ -95,6 +95,7 @@ type NodeConfiguration struct { // Optional fields. AmazonCloudIntegration *operatorv1.AmazonCloudIntegration LogCollector *operatorv1.LogCollector + MonitorResource *operatorv1.Monitor MigrateNamespaces bool NodeAppArmorProfile string BirdTemplates map[string]string @@ -206,6 +207,8 @@ func (c *nodeComponent) Objects() ([]client.Object, []client.Object) { if c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { // Include Service for exposing node metrics. objs = append(objs, c.nodeMetricsService()) + } else if c.cfg.MonitorResource != nil { + objs = append(objs, c.nodeMetricsService()) } cniConfig := c.nodeCNIConfigMap() @@ -510,10 +513,22 @@ func (c *nodeComponent) nodeRole() *rbacv1.ClusterRole { "externalnetworks", "licensekeys", "remoteclusterconfigurations", + "stagedglobalnetworkpolicies", + "stagedkubernetesnetworkpolicies", + "stagednetworkpolicies", + "tiers", "packetcaptures", }, Verbs: []string{"get", "list", "watch"}, }, + { + // Tigera Secure creates some tiers on startup. + APIGroups: []string{"crd.projectcalico.org"}, + Resources: []string{ + "tiers", + }, + Verbs: []string{"create"}, + }, { // Tigera Secure updates status for packet captures. APIGroups: []string{"crd.projectcalico.org"}, @@ -1292,7 +1307,7 @@ func (c *nodeComponent) nodeVolumeMounts() []corev1.VolumeMount { nodeVolumeMounts = append(nodeVolumeMounts, corev1.VolumeMount{MountPath: "/var/log/calico", Name: "var-log-calico"}) - if c.cfg.Installation.Variant != operatorv1.TigeraSecureEnterprise && c.cfg.Installation.CNI.Type == operatorv1.PluginCalico { + if c.cfg.Installation.CNI.Type == operatorv1.PluginCalico { cniLogMount := corev1.VolumeMount{MountPath: "/var/log/calico/cni", Name: "cni-log-dir", ReadOnly: false} nodeVolumeMounts = append(nodeVolumeMounts, cniLogMount) } @@ -1598,6 +1613,36 @@ func (c *nodeComponent) nodeEnvVars() []corev1.EnvVar { nodeEnv = append(nodeEnv, corev1.EnvVar{Name: "FELIX_IPV6SUPPORT", Value: "false"}) } + // We are checking for the logCollector and Monitor resource in Calico but not in Calico Enterprise + // because, in Calico logCollector may or may not be present whereas in the Calico Enterprise it is always + // expected. + if c.cfg.Installation.Variant == operatorv1.Calico { + extraNodeEnv := []corev1.EnvVar{} + if c.cfg.LogCollector != nil { + extraNodeEnv = append(extraNodeEnv, []corev1.EnvVar{ + {Name: "FELIX_FLOWLOGSFILEENABLED", Value: "true"}, + {Name: "FELIX_FLOWLOGSFILEINCLUDELABELS", Value: "true"}, + {Name: "FELIX_FLOWLOGSFILEINCLUDEPOLICIES", Value: "true"}, + {Name: "FELIX_FLOWLOGSFILEINCLUDESERVICE", Value: "true"}, + {Name: "FELIX_FLOWLOGSENABLENETWORKSETS", Value: "true"}, + }...) + } + if c.cfg.MonitorResource != nil { + extraNodeEnv = append(extraNodeEnv, []corev1.EnvVar{ + {Name: "FELIX_PROMETHEUSREPORTERENABLED", Value: "true"}, + {Name: "FELIX_PROMETHEUSREPORTERPORT", Value: fmt.Sprintf("%d", c.cfg.NodeReporterMetricsPort)}, + }...) + if c.cfg.PrometheusServerTLS != nil { + extraNodeEnv = append(extraNodeEnv, []corev1.EnvVar{ + {Name: "FELIX_PROMETHEUSREPORTERCERTFILE", Value: c.cfg.PrometheusServerTLS.VolumeMountCertificateFilePath()}, + {Name: "FELIX_PROMETHEUSREPORTERKEYFILE", Value: c.cfg.PrometheusServerTLS.VolumeMountKeyFilePath()}, + {Name: "FELIX_PROMETHEUSREPORTERCAFILE", Value: c.cfg.TLS.TrustedBundle.MountPath()}, + }...) + } + } + nodeEnv = append(nodeEnv, extraNodeEnv...) + } + if c.cfg.Installation.Variant == operatorv1.TigeraSecureEnterprise { // Add in Calico Enterprise specific configuration. extraNodeEnv := []corev1.EnvVar{ diff --git a/pkg/render/node_test.go b/pkg/render/node_test.go index 7d6a76ada4..e312ad982c 100644 --- a/pkg/render/node_test.go +++ b/pkg/render/node_test.go @@ -748,6 +748,142 @@ var _ = Describe("Node rendering tests", func() { } }) + It("should render flow logs env vars if logCollector is not nil for Calico", func() { + defaultInstance.Variant = operatorv1.Calico + cfg.NodeReporterMetricsPort = 9081 + + component := render.Node(&cfg) + Expect(component.ResolveImages(nil)).To(BeNil()) + resources, _ := component.Objects() + // The DaemonSet should have the correct configuration. + ds := rtest.GetResource(resources, "calico-node", "calico-system", "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) + rtest.ExpectEnv(rtest.GetContainer(ds.Spec.Template.Spec.InitContainers, "install-cni").Env, "CNI_NET_DIR", "/etc/cni/net.d") + + expectedNodeEnv := []corev1.EnvVar{ + // Default envvars. + {Name: "DATASTORE_TYPE", Value: "kubernetes"}, + {Name: "WAIT_FOR_DATASTORE", Value: "true"}, + {Name: "CALICO_MANAGE_CNI", Value: "true"}, + {Name: "CALICO_NETWORKING_BACKEND", Value: "bird"}, + {Name: "CLUSTER_TYPE", Value: "k8s,operator,bgp"}, + {Name: "CALICO_DISABLE_FILE_LOGGING", Value: "false"}, + {Name: "FELIX_DEFAULTENDPOINTTOHOSTACTION", Value: "ACCEPT"}, + {Name: "FELIX_HEALTHENABLED", Value: "true"}, + {Name: "FELIX_HEALTHPORT", Value: "9099"}, + { + Name: "NODENAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "spec.nodeName"}, + }, + }, + { + Name: "NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}, + }, + }, + {Name: "FELIX_TYPHAK8SNAMESPACE", Value: "calico-system"}, + {Name: "FELIX_TYPHAK8SSERVICENAME", Value: "calico-typha"}, + {Name: "FELIX_TYPHACAFILE", Value: certificatemanagement.TrustedCertBundleMountPath}, + {Name: "FELIX_TYPHACERTFILE", Value: "/node-certs/tls.crt"}, + {Name: "FELIX_TYPHAKEYFILE", Value: "/node-certs/tls.key"}, + {Name: "FIPS_MODE_ENABLED", Value: "false"}, + } + expectedNodeEnv = configureExpectedNodeEnvIPVersions(expectedNodeEnv, defaultInstance, enableIPv4, enableIPv6) + Expect(ds.Spec.Template.Spec.Containers[0].Env).To(ConsistOf(expectedNodeEnv)) + Expect(len(ds.Spec.Template.Spec.Containers[0].Env)).To(Equal(len(expectedNodeEnv))) + + cfg.LogCollector = &operatorv1.LogCollector{} + component = render.Node(&cfg) + resources, _ = component.Objects() + // The DaemonSet should have the correct configuration. + ds = rtest.GetResource(resources, "calico-node", "calico-system", "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) + expectedNodeEnv = []corev1.EnvVar{ + // Default envvars. + {Name: "DATASTORE_TYPE", Value: "kubernetes"}, + {Name: "WAIT_FOR_DATASTORE", Value: "true"}, + {Name: "CALICO_MANAGE_CNI", Value: "true"}, + {Name: "CALICO_NETWORKING_BACKEND", Value: "bird"}, + {Name: "CLUSTER_TYPE", Value: "k8s,operator,bgp"}, + {Name: "CALICO_DISABLE_FILE_LOGGING", Value: "false"}, + {Name: "FELIX_DEFAULTENDPOINTTOHOSTACTION", Value: "ACCEPT"}, + {Name: "FELIX_HEALTHENABLED", Value: "true"}, + {Name: "FELIX_HEALTHPORT", Value: "9099"}, + { + Name: "NODENAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "spec.nodeName"}, + }, + }, + { + Name: "NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}, + }, + }, + {Name: "FELIX_TYPHAK8SNAMESPACE", Value: "calico-system"}, + {Name: "FELIX_TYPHAK8SSERVICENAME", Value: "calico-typha"}, + {Name: "FELIX_TYPHACAFILE", Value: certificatemanagement.TrustedCertBundleMountPath}, + {Name: "FELIX_TYPHACERTFILE", Value: "/node-certs/tls.crt"}, + {Name: "FELIX_TYPHAKEYFILE", Value: "/node-certs/tls.key"}, + {Name: "FELIX_FLOWLOGSFILEENABLED", Value: "true"}, + {Name: "FELIX_FLOWLOGSFILEINCLUDELABELS", Value: "true"}, + {Name: "FELIX_FLOWLOGSFILEINCLUDEPOLICIES", Value: "true"}, + {Name: "FELIX_FLOWLOGSFILEINCLUDESERVICE", Value: "true"}, + {Name: "FELIX_FLOWLOGSENABLENETWORKSETS", Value: "true"}, + {Name: "FIPS_MODE_ENABLED", Value: "false"}, + } + expectedNodeEnv = configureExpectedNodeEnvIPVersions(expectedNodeEnv, defaultInstance, enableIPv4, enableIPv6) + Expect(ds.Spec.Template.Spec.Containers[0].Env).To(ConsistOf(expectedNodeEnv)) + Expect(len(ds.Spec.Template.Spec.Containers[0].Env)).To(Equal(len(expectedNodeEnv))) + }) + + It("should render prometheus env vars if Monitor is not nil for Calico", func() { + defaultInstance.Variant = operatorv1.Calico + cfg.NodeReporterMetricsPort = 9081 + + cfg.MonitorResource = &operatorv1.Monitor{} + component := render.Node(&cfg) + resources, _ := component.Objects() + // The DaemonSet should have the correct configuration. + ds := rtest.GetResource(resources, "calico-node", "calico-system", "apps", "v1", "DaemonSet").(*appsv1.DaemonSet) + expectedNodeEnv := []corev1.EnvVar{ + // Default envvars. + {Name: "DATASTORE_TYPE", Value: "kubernetes"}, + {Name: "WAIT_FOR_DATASTORE", Value: "true"}, + {Name: "CALICO_MANAGE_CNI", Value: "true"}, + {Name: "CALICO_NETWORKING_BACKEND", Value: "bird"}, + {Name: "CLUSTER_TYPE", Value: "k8s,operator,bgp"}, + {Name: "CALICO_DISABLE_FILE_LOGGING", Value: "false"}, + {Name: "FELIX_DEFAULTENDPOINTTOHOSTACTION", Value: "ACCEPT"}, + {Name: "FELIX_HEALTHENABLED", Value: "true"}, + {Name: "FELIX_HEALTHPORT", Value: "9099"}, + { + Name: "NODENAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "spec.nodeName"}, + }, + }, + { + Name: "NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}, + }, + }, + {Name: "FELIX_TYPHAK8SNAMESPACE", Value: "calico-system"}, + {Name: "FELIX_TYPHAK8SSERVICENAME", Value: "calico-typha"}, + {Name: "FELIX_TYPHACAFILE", Value: certificatemanagement.TrustedCertBundleMountPath}, + {Name: "FELIX_TYPHACERTFILE", Value: "/node-certs/tls.crt"}, + {Name: "FELIX_TYPHAKEYFILE", Value: "/node-certs/tls.key"}, + {Name: "FELIX_PROMETHEUSREPORTERENABLED", Value: "true"}, + {Name: "FELIX_PROMETHEUSREPORTERPORT", Value: fmt.Sprintf("%d", cfg.NodeReporterMetricsPort)}, + {Name: "FIPS_MODE_ENABLED", Value: "false"}, + } + expectedNodeEnv = configureExpectedNodeEnvIPVersions(expectedNodeEnv, defaultInstance, enableIPv4, enableIPv6) + Expect(ds.Spec.Template.Spec.Containers[0].Env).To(ConsistOf(expectedNodeEnv)) + Expect(len(ds.Spec.Template.Spec.Containers[0].Env)).To(Equal(len(expectedNodeEnv))) + }) + It("should render all resources for a default configuration using TigeraSecureEnterprise", func() { expectedResources := []struct { name string