diff --git a/.secrets.baseline b/.secrets.baseline index 8c2f2d68..5b04f343 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -75,10 +75,6 @@ { "path": "detect_secrets.filters.allowlist.is_line_allowlisted" }, - { - "path": "detect_secrets.filters.common.is_baseline_file", - "filename": ".secrets.baseline" - }, { "path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies", "min_level": 2 @@ -169,14 +165,14 @@ "filename": "apis/clusters/v1beta1/cadence_types.go", "hashed_secret": "a242f4a16b957f7ff99eb24e189e94d270d2348b", "is_verified": false, - "line_number": 293 + "line_number": 294 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/cadence_types.go", "hashed_secret": "a57ce131bd944bdf8ba2f2f93e179dc416ed0315", "is_verified": false, - "line_number": 302 + "line_number": 303 } ], "apis/clusters/v1beta1/cassandra_types.go": [ @@ -208,7 +204,7 @@ "filename": "apis/clusters/v1beta1/cassandra_webhook.go", "hashed_secret": "e0a46b27231f798fe22dc4d5d82b5feeb5dcf085", "is_verified": false, - "line_number": 235 + "line_number": 232 } ], "apis/clusters/v1beta1/kafka_types.go": [ @@ -351,7 +347,7 @@ "filename": "apis/clusters/v1beta1/redis_webhook.go", "hashed_secret": "bc1c5ae5fd4a238d86261f422e62c489de408c22", "is_verified": false, - "line_number": 322 + "line_number": 323 } ], "apis/clusters/v1beta1/zz_generated.deepcopy.go": [ @@ -360,7 +356,7 @@ "filename": "apis/clusters/v1beta1/zz_generated.deepcopy.go", "hashed_secret": "44e17306b837162269a410204daaa5ecee4ec22c", "is_verified": false, - "line_number": 1316 + "line_number": 676 } ], "apis/kafkamanagement/v1beta1/kafkauser_types.go": [ @@ -499,16 +495,9 @@ { "type": "Secret Keyword", "filename": "controllers/clusters/cadence_controller.go", - "hashed_secret": "bcf196cdeea4d7ed8b04dcbbd40111eb5e9abeac", - "is_verified": false, - "line_number": 644 - }, - { - "type": "Secret Keyword", - "filename": "controllers/clusters/cadence_controller.go", - "hashed_secret": "192d703e91a60432ce06bfe26adfd12f5c7b931f", + "hashed_secret": "5ffe533b830f08a0326348a9160afafc8ada44db", "is_verified": false, - "line_number": 677 + "line_number": 755 } ], "controllers/clusters/datatest/kafka_v1beta1.yaml": [ @@ -529,6 +518,22 @@ "line_number": 51 } ], + "controllers/clusters/helpers.go": [ + { + "type": "Secret Keyword", + "filename": "controllers/clusters/helpers.go", + "hashed_secret": "5ffe533b830f08a0326348a9160afafc8ada44db", + "is_verified": false, + "line_number": 216 + }, + { + "type": "Secret Keyword", + "filename": "controllers/clusters/helpers.go", + "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", + "is_verified": false, + "line_number": 221 + } + ], "controllers/clusters/kafkaconnect_controller_test.go": [ { "type": "Secret Keyword", @@ -1116,5 +1121,5 @@ } ] }, - "generated_at": "2024-02-28T09:07:57Z" + "generated_at": "2024-02-28T16:08:51Z" } diff --git a/apis/clusters/v1beta1/cadence_types.go b/apis/clusters/v1beta1/cadence_types.go index 448d6c85..23c00aca 100644 --- a/apis/clusters/v1beta1/cadence_types.go +++ b/apis/clusters/v1beta1/cadence_types.go @@ -101,10 +101,10 @@ type AWSArchival struct { } type PackagedProvisioning struct { - UseAdvancedVisibility bool `json:"useAdvancedVisibility"` - BundledKafkaSpec *BundledKafkaSpec `json:"bundledKafkaSpec,omitempty"` - BundledOpenSearchSpec *BundledOpenSearchSpec `json:"bundledOpenSearchSpec,omitempty"` - BundledCassandraSpec *BundledCassandraSpec `json:"bundledCassandraSpec"` + UseAdvancedVisibility bool `json:"useAdvancedVisibility"` + + // +kubebuilder:validation:Enum=Developer;Production-Starter;Production-Small + SolutionSize string `json:"solutionSize"` } type SharedProvisioning struct { @@ -131,8 +131,9 @@ type AdvancedVisibility struct { type CadenceStatus struct { GenericStatus `json:",inline"` - DataCentres []*CadenceDataCentreStatus `json:"dataCentres,omitempty"` - TargetSecondaryCadence []*CadenceDependencyTarget `json:"targetSecondaryCadence,omitempty"` + PackagedProvisioningClusterRefs []*Reference `json:"packagedProvisioningClusterRefs,omitempty"` + DataCentres []*CadenceDataCentreStatus `json:"dataCentres,omitempty"` + TargetSecondaryCadence []*CadenceDependencyTarget `json:"targetSecondaryCadence,omitempty"` } type CadenceDataCentreStatus struct { @@ -584,3 +585,12 @@ func (cdc *CadenceDataCentreStatus) Equals(o *CadenceDataCentreStatus) bool { nodesEqual(cdc.Nodes, o.Nodes) && cdc.NumberOfNodes == o.NumberOfNodes } + +func (c *CadenceSpec) CalculateNodeSize(cloudProvider, solution, app string) string { + if appSizes, ok := models.SolutionSizesMap[cloudProvider]; ok { + if solutionMap, ok := appSizes[app]; ok { + return solutionMap[solution] + } + } + return "" +} diff --git a/apis/clusters/v1beta1/cadence_webhook.go b/apis/clusters/v1beta1/cadence_webhook.go index cda1a6dc..1846d650 100644 --- a/apis/clusters/v1beta1/cadence_webhook.go +++ b/apis/clusters/v1beta1/cadence_webhook.go @@ -19,7 +19,6 @@ package v1beta1 import ( "context" "fmt" - "net" "regexp" "k8s.io/apimachinery/pkg/runtime" @@ -129,11 +128,6 @@ func (cv *cadenceValidator) ValidateCreate(ctx context.Context, obj runtime.Obje } } - err = c.Spec.validatePackagedProvisioningCreation() - if err != nil { - return err - } - for _, dc := range c.Spec.DataCentres { err = dc.GenericDataCentreSpec.validateCreation() if err != nil { @@ -147,6 +141,14 @@ func (cv *cadenceValidator) ValidateCreate(ctx context.Context, obj runtime.Obje if dc.CloudProvider != models.AWSVPC && dc.PrivateLink != nil { return models.ErrPrivateLinkSupportedOnlyForAWS } + + if len(c.Spec.PackagedProvisioning) != 0 { + err = validateNetwork(dc.Network) + if err != nil { + return err + } + } + } for _, rs := range c.Spec.ResizeSettings { @@ -348,24 +350,7 @@ func (cs *CadenceSpec) validatePackagedProvisioning(old []*PackagedProvisioning) } for i, pp := range cs.PackagedProvisioning { - if pp.UseAdvancedVisibility { - if pp.BundledKafkaSpec == nil || pp.BundledOpenSearchSpec == nil { - return fmt.Errorf("BundledKafkaSpec and BundledOpenSearchSpec structs must not be empty because UseAdvancedVisibility is set to true") - } - if *pp.BundledKafkaSpec != *old[i].BundledKafkaSpec { - return models.ErrImmutablePackagedProvisioning - } - if *pp.BundledOpenSearchSpec != *old[i].BundledOpenSearchSpec { - return models.ErrImmutablePackagedProvisioning - } - } else { - if pp.BundledKafkaSpec != nil || pp.BundledOpenSearchSpec != nil { - return fmt.Errorf("BundledKafkaSpec and BundledOpenSearchSpec structs must be empty because UseAdvancedVisibility is set to false") - } - } - - if *pp.BundledCassandraSpec != *old[i].BundledCassandraSpec || - pp.UseAdvancedVisibility != old[i].UseAdvancedVisibility { + if *pp != *old[i] { return models.ErrImmutablePackagedProvisioning } } @@ -506,119 +491,3 @@ func (sp *StandardProvisioning) validate() error { return nil } - -func (b *BundledKafkaSpec) validate() error { - networkMatched, err := regexp.Match(models.PeerSubnetsRegExp, []byte(b.Network)) - if !networkMatched || err != nil { - return fmt.Errorf("the provided CIDR: %s must contain four dot separated parts and form the Private IP address. All bits in the host part of the CIDR must be 0. Suffix must be between 16-28. %v", b.Network, err) - } - - err = validateReplicationFactor(models.KafkaReplicationFactors, b.ReplicationFactor) - if err != nil { - return err - } - - if ((b.NodesNumber*b.ReplicationFactor)/b.ReplicationFactor)%b.ReplicationFactor != 0 { - return fmt.Errorf("kafka: number of nodes must be a multiple of replication factor: %v", b.ReplicationFactor) - } - - return nil -} - -func (c *BundledCassandraSpec) validate() error { - networkMatched, err := regexp.Match(models.PeerSubnetsRegExp, []byte(c.Network)) - if !networkMatched || err != nil { - return fmt.Errorf("the provided CIDR: %s must contain four dot separated parts and form the Private IP address. All bits in the host part of the CIDR must be 0. Suffix must be between 16-28. %v", c.Network, err) - } - - err = validateReplicationFactor(models.CassandraReplicationFactors, c.ReplicationFactor) - if err != nil { - return err - } - - if ((c.NodesNumber*c.ReplicationFactor)/c.ReplicationFactor)%c.ReplicationFactor != 0 { - return fmt.Errorf("cassandra: number of nodes must be a multiple of replication factor: %v", c.ReplicationFactor) - } - - return nil -} - -func (o *BundledOpenSearchSpec) validate() error { - networkMatched, err := regexp.Match(models.PeerSubnetsRegExp, []byte(o.Network)) - if !networkMatched || err != nil { - return fmt.Errorf("the provided CIDR: %s must contain four dot separated parts and form the Private IP address. All bits in the host part of the CIDR must be 0. Suffix must be between 16-28. %v", o.Network, err) - } - - err = validateOpenSearchNumberOfRacks(o.NumberOfRacks) - if err != nil { - return err - } - - return nil -} - -func (cs *CadenceSpec) validatePackagedProvisioningCreation() error { - for _, dc := range cs.DataCentres { - for _, pp := range cs.PackagedProvisioning { - if (pp.UseAdvancedVisibility && pp.BundledKafkaSpec == nil) || (pp.UseAdvancedVisibility && pp.BundledOpenSearchSpec == nil) { - return fmt.Errorf("BundledKafkaSpec and BundledOpenSearchSpec structs must not be empty because UseAdvancedVisibility is set to true") - } - - if pp.BundledKafkaSpec != nil { - err := pp.BundledKafkaSpec.validate() - if err != nil { - return err - } - - err = dc.validateNetwork(pp.BundledKafkaSpec.Network) - if err != nil { - return err - } - } - - if pp.BundledCassandraSpec != nil { - err := pp.BundledCassandraSpec.validate() - if err != nil { - return err - } - - err = dc.validateNetwork(pp.BundledCassandraSpec.Network) - if err != nil { - return err - } - } - - if pp.BundledOpenSearchSpec != nil { - err := pp.BundledOpenSearchSpec.validate() - if err != nil { - return err - } - - err = dc.validateNetwork(pp.BundledOpenSearchSpec.Network) - if err != nil { - return err - } - } - } - } - - return nil -} - -func (cdc *CadenceDataCentre) validateNetwork(network string) error { - _, ipnet, err := net.ParseCIDR(cdc.Network) - if err != nil { - return err - } - - ip, _, err := net.ParseCIDR(network) - if err != nil { - return err - } - - if ipnet.Contains(ip) { - return fmt.Errorf("cluster network %s overlaps with network %s", cdc.Network, network) - } - - return nil -} diff --git a/apis/clusters/v1beta1/cassandra_webhook.go b/apis/clusters/v1beta1/cassandra_webhook.go index 7e0a4ec1..67f87707 100644 --- a/apis/clusters/v1beta1/cassandra_webhook.go +++ b/apis/clusters/v1beta1/cassandra_webhook.go @@ -171,10 +171,6 @@ func (cv *cassandraValidator) ValidateUpdate(ctx context.Context, old runtime.Ob return models.ErrTypeAssertion } - if oldCluster.Spec.BundledUseOnly && c.Generation != oldCluster.Generation { - return models.ErrBundledUseOnlyResourceUpdateIsNotSupported - } - if oldCluster.Spec.RestoreFrom != nil { return nil } diff --git a/apis/clusters/v1beta1/kafka_webhook.go b/apis/clusters/v1beta1/kafka_webhook.go index 7564d30a..ee558d33 100644 --- a/apis/clusters/v1beta1/kafka_webhook.go +++ b/apis/clusters/v1beta1/kafka_webhook.go @@ -191,10 +191,6 @@ func (kv *kafkaValidator) ValidateUpdate(ctx context.Context, old runtime.Object return fmt.Errorf("cannot assert object %v to Kafka", old.GetObjectKind()) } - if oldKafka.Spec.BundledUseOnly && k.Generation != oldKafka.Generation { - return models.ErrBundledUseOnlyResourceUpdateIsNotSupported - } - err := k.Spec.validateUpdate(&oldKafka.Spec) if err != nil { return fmt.Errorf("cannot update, error: %v", err) diff --git a/apis/clusters/v1beta1/opensearch_webhook.go b/apis/clusters/v1beta1/opensearch_webhook.go index 54293fd7..fd94e495 100644 --- a/apis/clusters/v1beta1/opensearch_webhook.go +++ b/apis/clusters/v1beta1/opensearch_webhook.go @@ -194,10 +194,6 @@ func (osv *openSearchValidator) ValidateUpdate(ctx context.Context, old runtime. oldCluster := old.(*OpenSearch) - if oldCluster.Spec.BundledUseOnly && !oldCluster.Spec.IsEqual(os.Spec) { - return models.ErrBundledUseOnlyResourceUpdateIsNotSupported - } - if oldCluster.Spec.RestoreFrom != nil { return nil } diff --git a/apis/clusters/v1beta1/validation.go b/apis/clusters/v1beta1/validation.go index f27248f8..5356121e 100644 --- a/apis/clusters/v1beta1/validation.go +++ b/apis/clusters/v1beta1/validation.go @@ -20,7 +20,9 @@ import ( "context" "errors" "fmt" + "net" "regexp" + "strconv" "strings" k8sappsv1 "k8s.io/api/apps/v1" @@ -459,3 +461,20 @@ func (s *GenericDataCentreSpec) validateCloudProviderSettings() error { func (s *GenericDataCentreSpec) hasCloudProviderSettings() bool { return s.AWSSettings != nil || s.GCPSettings != nil && s.AzureSettings != nil } + +func validateNetwork(network string) error { + ip, _, err := net.ParseCIDR(network) + if err != nil { + return err + } + + ipParts := strings.Split(ip.String(), ".") + secondOctet, err := strconv.Atoi(ipParts[1]) + if err != nil { + return err + } + if secondOctet > 251 || secondOctet < 1 { + return models.ErrInvalidCIDR + } + return nil +} diff --git a/apis/clusters/v1beta1/zz_generated.deepcopy.go b/apis/clusters/v1beta1/zz_generated.deepcopy.go index 0fc30195..405052a7 100644 --- a/apis/clusters/v1beta1/zz_generated.deepcopy.go +++ b/apis/clusters/v1beta1/zz_generated.deepcopy.go @@ -367,7 +367,7 @@ func (in *CadenceSpec) DeepCopyInto(out *CadenceSpec) { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] *out = new(PackagedProvisioning) - (*in).DeepCopyInto(*out) + **out = **in } } } @@ -409,6 +409,17 @@ func (in *CadenceSpec) DeepCopy() *CadenceSpec { func (in *CadenceStatus) DeepCopyInto(out *CadenceStatus) { *out = *in in.GenericStatus.DeepCopyInto(&out.GenericStatus) + if in.PackagedProvisioningClusterRefs != nil { + in, out := &in.PackagedProvisioningClusterRefs, &out.PackagedProvisioningClusterRefs + *out = make([]*apiextensions.ObjectReference, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(apiextensions.ObjectReference) + **out = **in + } + } + } if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*CadenceDataCentreStatus, len(*in)) @@ -2138,21 +2149,6 @@ func (in *Options) DeepCopy() *Options { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PackagedProvisioning) DeepCopyInto(out *PackagedProvisioning) { *out = *in - if in.BundledKafkaSpec != nil { - in, out := &in.BundledKafkaSpec, &out.BundledKafkaSpec - *out = new(BundledKafkaSpec) - **out = **in - } - if in.BundledOpenSearchSpec != nil { - in, out := &in.BundledOpenSearchSpec, &out.BundledOpenSearchSpec - *out = new(BundledOpenSearchSpec) - **out = **in - } - if in.BundledCassandraSpec != nil { - in, out := &in.BundledCassandraSpec, &out.BundledCassandraSpec - *out = new(BundledCassandraSpec) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PackagedProvisioning. diff --git a/config/crd/bases/clusters.instaclustr.com_cadences.yaml b/config/crd/bases/clusters.instaclustr.com_cadences.yaml index 08c1a729..52f72485 100644 --- a/config/crd/bases/clusters.instaclustr.com_cadences.yaml +++ b/config/crd/bases/clusters.instaclustr.com_cadences.yaml @@ -223,64 +223,16 @@ spec: packagedProvisioning: items: properties: - bundledCassandraSpec: - properties: - network: - type: string - nodeSize: - type: string - nodesNumber: - type: integer - passwordAndUserAuth: - type: boolean - privateIPBroadcastForDiscovery: - type: boolean - replicationFactor: - type: integer - required: - - network - - nodeSize - - nodesNumber - - passwordAndUserAuth - - privateIPBroadcastForDiscovery - - replicationFactor - type: object - bundledKafkaSpec: - properties: - network: - type: string - nodeSize: - type: string - nodesNumber: - type: integer - partitionsNumber: - type: integer - replicationFactor: - type: integer - required: - - network - - nodeSize - - nodesNumber - - partitionsNumber - - replicationFactor - type: object - bundledOpenSearchSpec: - properties: - network: - type: string - nodeSize: - type: string - numberOfRacks: - type: integer - required: - - network - - nodeSize - - numberOfRacks - type: object + solutionSize: + enum: + - Developer + - Production-Starter + - Production-Small + type: string useAdvancedVisibility: type: boolean required: - - bundledCassandraSpec + - solutionSize - useAdvancedVisibility type: object maxItems: 1 @@ -594,6 +546,19 @@ spec: type: array nodeCount: type: string + packagedProvisioningClusterRefs: + items: + description: ObjectReference is namespaced reference to an object + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array state: type: string targetSecondaryCadence: diff --git a/config/samples/clusters_v1beta1_cadence.yaml b/config/samples/clusters_v1beta1_cadence.yaml index 73b417db..cf965f5f 100644 --- a/config/samples/clusters_v1beta1_cadence.yaml +++ b/config/samples/clusters_v1beta1_cadence.yaml @@ -18,30 +18,14 @@ spec: # dependencyCdcId: "9d43ac54-7317-4ce5-859a-e9d0443508a4" # dependencyVpcType: "VPC_PEERED" packagedProvisioning: - - bundledCassandraSpec: - nodeSize: "CAS-DEV-t4g.small-5" - network: "10.2.0.0/16" - replicationFactor: 3 - nodesNumber: 3 - privateIPBroadcastForDiscovery: false - passwordAndUserAuth: true - useAdvancedVisibility: true - bundledKafkaSpec: - nodeSize: "KFK-DEV-t4g.small-5" - nodesNumber: 3 - network: "10.3.0.0/16" - replicationFactor: 3 - partitionsNumber: 3 - bundledOpenSearchSpec: - nodeSize: "SRH-DEV-t4g.small-5" - numberOfRacks: 3 - network: "10.4.0.0/16" + - useAdvancedVisibility: true + solutionSize: "Production-Small" # twoFactorDelete: # - email: "rostyslp@netapp.com" - privateNetworkCluster: false + privateNetwork: false dataCentres: - region: "US_EAST_1" - network: "10.12.0.0/16" + network: "10.251.0.0/16" # In a multi-region mode setup, ensure the CIDR block for the secondary cluster does not overlap with the primary one # network: "10.16.0.0/16" cloudProvider: "AWS_VPC" diff --git a/controllers/clusters/cadence_controller.go b/controllers/clusters/cadence_controller.go index 87251384..44f0405f 100644 --- a/controllers/clusters/cadence_controller.go +++ b/controllers/clusters/cadence_controller.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "github.com/go-logr/logr" k8serrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -124,9 +123,8 @@ func (r *CadenceReconciler) handleCreateCluster( ) (ctrl.Result, error) { if c.Status.ID == "" { patch := c.NewPatch() - - for _, packagedProvisioning := range c.Spec.PackagedProvisioning { - requeueNeeded, err := r.preparePackagedSolution(ctx, c, packagedProvisioning) + for _, pp := range c.Spec.PackagedProvisioning { + requeueNeeded, err := r.reconcilePackagedProvisioning(ctx, c, pp.UseAdvancedVisibility) if err != nil { l.Error(err, "Cannot prepare packaged solution for Cadence cluster", "cluster name", c.Spec.Name, @@ -408,22 +406,6 @@ func (r *CadenceReconciler) handleDeleteCluster( "cluster name", c.Spec.Name, "cluster status", c.Status) - for _, packagedProvisioning := range c.Spec.PackagedProvisioning { - err = r.deletePackagedResources(ctx, c, packagedProvisioning) - if err != nil { - l.Error( - err, "Cannot delete Cadence packaged resources", - "cluster name", c.Spec.Name, - "cluster ID", c.Status.ID, - ) - - r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed, - "Cannot delete Cadence packaged resources. Reason: %v", err) - - return ctrl.Result{}, err - } - } - r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) patch := c.NewPatch() controllerutil.RemoveFinalizer(c, models.DeletionFinalizer) @@ -458,84 +440,43 @@ func (r *CadenceReconciler) handleDeleteCluster( return ctrl.Result{}, nil } -func (r *CadenceReconciler) preparePackagedSolution( +func (r *CadenceReconciler) reconcilePackagedProvisioning( ctx context.Context, c *v1beta1.Cadence, - packagedProvisioning *v1beta1.PackagedProvisioning, + useAdvancedVisibility bool, ) (bool, error) { - if len(c.Spec.DataCentres) < 1 { - return false, models.ErrZeroDataCentres - } - - labelsToQuery := fmt.Sprintf("%s=%s", models.ControlledByLabel, c.Name) - selector, err := labels.Parse(labelsToQuery) - if err != nil { - return false, err - } - - cassandraList := &v1beta1.CassandraList{} - err = r.Client.List(ctx, cassandraList, &client.ListOptions{LabelSelector: selector}) - if err != nil { - return false, err - } - - if len(cassandraList.Items) == 0 { - appVersions, err := r.API.ListAppVersions(models.CassandraAppKind) - if err != nil { - return false, fmt.Errorf("cannot list versions for kind: %v, err: %w", - models.CassandraAppKind, err) - } - - cassandraVersions := getSortedAppVersions(appVersions, models.CassandraAppType) - if len(cassandraVersions) == 0 { - return false, fmt.Errorf("there are no versions for %v kind", - models.CassandraAppKind) - } - - cassandraSpec, err := r.newCassandraSpec(c, cassandraVersions[len(cassandraVersions)-1].String()) + for _, dc := range c.Spec.DataCentres { + availableCIDRs, err := CalculateAvailableNetworks(dc.Network) if err != nil { return false, err } - err = r.Client.Create(ctx, cassandraSpec) + labelsToQuery := fmt.Sprintf("%s=%s", models.ControlledByLabel, c.Name) + selector, err := labels.Parse(labelsToQuery) if err != nil { return false, err } - return true, nil - } - - kafkaList := &v1beta1.KafkaList{} - osList := &v1beta1.OpenSearchList{} - advancedVisibility := &v1beta1.AdvancedVisibility{ - TargetKafka: &v1beta1.CadenceDependencyTarget{}, - TargetOpenSearch: &v1beta1.CadenceDependencyTarget{}, - } - var advancedVisibilities []*v1beta1.AdvancedVisibility - if packagedProvisioning.UseAdvancedVisibility { - err = r.Client.List(ctx, kafkaList, &client.ListOptions{LabelSelector: selector}) + cassandraList := &v1beta1.CassandraList{} + err = r.Client.List(ctx, cassandraList, &client.ListOptions{ + LabelSelector: selector, + }) if err != nil { return false, err } - if len(kafkaList.Items) == 0 { - appVersions, err := r.API.ListAppVersions(models.KafkaAppKind) - if err != nil { - return false, fmt.Errorf("cannot list versions for kind: %v, err: %w", - models.KafkaAppKind, err) - } - kafkaVersions := getSortedAppVersions(appVersions, models.KafkaAppType) - if len(kafkaVersions) == 0 { - return false, fmt.Errorf("there are no versions for %v kind", - models.KafkaAppType) + if len(cassandraList.Items) == 0 { + version, err := r.reconcileAppVersion(models.CassandraAppKind, models.CassandraAppType) + if err != nil { + return false, err } - kafkaSpec, err := r.newKafkaSpec(c, kafkaVersions[len(kafkaVersions)-1].String()) + cassandraSpec, err := r.newCassandraSpec(c, version, availableCIDRs) if err != nil { return false, err } - err = r.Client.Create(ctx, kafkaSpec) + err = r.Client.Create(ctx, cassandraSpec) if err != nil { return false, err } @@ -543,147 +484,352 @@ func (r *CadenceReconciler) preparePackagedSolution( return true, nil } - if len(kafkaList.Items[0].Status.DataCentres) == 0 { - return true, nil - } + var advancedVisibility []*v1beta1.AdvancedVisibility + var clusterRefs []*v1beta1.Reference - advancedVisibility.TargetKafka.DependencyCDCID = kafkaList.Items[0].Status.DataCentres[0].ID - advancedVisibility.TargetKafka.DependencyVPCType = models.VPCPeered + if useAdvancedVisibility { + av := &v1beta1.AdvancedVisibility{ + TargetKafka: &v1beta1.CadenceDependencyTarget{}, + TargetOpenSearch: &v1beta1.CadenceDependencyTarget{}, + } - err = r.Client.List(ctx, osList, &client.ListOptions{LabelSelector: selector}) - if err != nil { - return false, err - } - if len(osList.Items) == 0 { - appVersions, err := r.API.ListAppVersions(models.OpenSearchAppKind) + kafkaList := &v1beta1.KafkaList{} + err = r.Client.List(ctx, kafkaList, &client.ListOptions{LabelSelector: selector}) if err != nil { - return false, fmt.Errorf("cannot list versions for kind: %v, err: %w", - models.OpenSearchAppKind, err) + return false, err + } + + if len(kafkaList.Items) == 0 { + version, err := r.reconcileAppVersion(models.KafkaAppKind, models.KafkaAppType) + if err != nil { + return false, err + } + + kafkaSpec, err := r.newKafkaSpec(c, version, availableCIDRs) + if err != nil { + return false, err + } + + err = r.Client.Create(ctx, kafkaSpec) + if err != nil { + return false, err + } + + return true, nil } - openSearchVersions := getSortedAppVersions(appVersions, models.OpenSearchAppType) - if len(openSearchVersions) == 0 { - return false, fmt.Errorf("there are no versions for %v kind", - models.OpenSearchAppType) + if len(kafkaList.Items[0].Status.DataCentres) == 0 { + return true, nil } - // For OpenSearch we cannot use the latest version because is not supported by Cadence. So we use the oldest one. - osSpec, err := r.newOpenSearchSpec(c, openSearchVersions[0].String()) - if err != nil { - return false, err + av.TargetKafka.DependencyCDCID = kafkaList.Items[0].Status.DataCentres[0].ID + av.TargetKafka.DependencyVPCType = models.VPCPeered + + ref := &v1beta1.Reference{ + Name: kafkaList.Items[0].Name, + Namespace: kafkaList.Items[0].Namespace, } + clusterRefs = append(clusterRefs, ref) - err = r.Client.Create(ctx, osSpec) + osList := &v1beta1.OpenSearchList{} + err = r.Client.List(ctx, osList, &client.ListOptions{LabelSelector: selector}) if err != nil { return false, err } - return true, nil + if len(osList.Items) == 0 { + version, err := r.reconcileAppVersion(models.OpenSearchAppKind, models.OpenSearchAppType) + if err != nil { + return false, err + } + + osSpec, err := r.newOpenSearchSpec(c, version, availableCIDRs) + if err != nil { + return false, err + } + + err = r.Client.Create(ctx, osSpec) + if err != nil { + return false, err + } + + return true, nil + } + + if len(osList.Items[0].Status.DataCentres) == 0 { + return true, nil + } + + ref = &v1beta1.Reference{ + Name: osList.Items[0].Name, + Namespace: osList.Items[0].Namespace, + } + clusterRefs = append(clusterRefs, ref) + + av.TargetOpenSearch.DependencyCDCID = osList.Items[0].Status.DataCentres[0].ID + av.TargetOpenSearch.DependencyVPCType = models.VPCPeered + advancedVisibility = append(advancedVisibility, av) } - if len(osList.Items[0].Status.DataCentres) == 0 { + if len(cassandraList.Items[0].Status.DataCentres) == 0 { return true, nil } - advancedVisibility.TargetOpenSearch.DependencyCDCID = osList.Items[0].Status.DataCentres[0].ID - advancedVisibility.TargetOpenSearch.DependencyVPCType = models.VPCPeered - advancedVisibilities = append(advancedVisibilities, advancedVisibility) + ref := &v1beta1.Reference{ + Name: cassandraList.Items[0].Name, + Namespace: cassandraList.Items[0].Namespace, + } + clusterRefs = append(clusterRefs, ref) + c.Status.PackagedProvisioningClusterRefs = clusterRefs + + c.Spec.StandardProvisioning = append(c.Spec.StandardProvisioning, &v1beta1.StandardProvisioning{ + AdvancedVisibility: advancedVisibility, + TargetCassandra: &v1beta1.CadenceDependencyTarget{ + DependencyCDCID: cassandraList.Items[0].Status.DataCentres[0].ID, + DependencyVPCType: models.VPCPeered, + }, + }) + + } + + return false, nil +} + +func (r *CadenceReconciler) newCassandraSpec( + c *v1beta1.Cadence, + version string, + availableCIDRs []string, +) (*v1beta1.Cassandra, error) { + typeMeta := r.reconcileTypeMeta(models.CassandraAppKind) + metadata := r.reconcileMetadata(c, models.CassandraAppKind) + dcs, err := r.reconcileAppDCs(c, availableCIDRs, models.CassandraAppKind) + if err != nil { + return nil, err } - if len(cassandraList.Items[0].Status.DataCentres) == 0 { - return true, nil + cassandraDCs, ok := dcs.([]*v1beta1.CassandraDataCentre) + if !ok { + return nil, fmt.Errorf("resource is not of type %T, got %T", []*v1beta1.CassandraDataCentre(nil), cassandraDCs) } - c.Spec.StandardProvisioning = append(c.Spec.StandardProvisioning, &v1beta1.StandardProvisioning{ - AdvancedVisibility: advancedVisibilities, - TargetCassandra: &v1beta1.CadenceDependencyTarget{ - DependencyCDCID: cassandraList.Items[0].Status.DataCentres[0].ID, - DependencyVPCType: models.VPCPeered, - }, - }) + spec := r.reconcileAppSpec(c, models.CassandraAppKind, version) + cassandraSpec, ok := spec.(v1beta1.CassandraSpec) + if !ok { + return nil, fmt.Errorf("resource is not of type %T, got %T", v1beta1.CassandraSpec{}, spec) + } - return false, nil + cassandraSpec.DataCentres = cassandraDCs + + return &v1beta1.Cassandra{ + TypeMeta: typeMeta, + ObjectMeta: metadata, + Spec: cassandraSpec, + }, nil } -func (r *CadenceReconciler) newCassandraSpec(c *v1beta1.Cadence, latestCassandraVersion string) (*v1beta1.Cassandra, error) { +func (r *CadenceReconciler) reconcileTypeMeta(app string) v1.TypeMeta { typeMeta := v1.TypeMeta{ - Kind: models.CassandraKind, APIVersion: models.ClustersV1beta1APIVersion, } + switch app { + case models.CassandraAppKind: + typeMeta.Kind = models.CassandraKind + case models.KafkaAppKind: + typeMeta.Kind = models.KafkaKind + case models.OpenSearchAppKind: + typeMeta.Kind = models.OpenSearchKind + } + + return typeMeta +} + +func (r *CadenceReconciler) reconcileMetadata(c *v1beta1.Cadence, app string) v1.ObjectMeta { metadata := v1.ObjectMeta{ - Name: models.CassandraChildPrefix + c.Name, Labels: map[string]string{models.ControlledByLabel: c.Name}, Annotations: map[string]string{models.ResourceStateAnnotation: models.CreatingEvent}, Namespace: c.ObjectMeta.Namespace, Finalizers: []string{}, } - slaTier := c.Spec.SLATier - privateNetwork := c.Spec.PrivateNetwork - pciCompliance := c.Spec.PCICompliance + switch app { + case models.CassandraAppKind: + metadata.Name = models.CassandraChildPrefix + c.Name + case models.KafkaAppKind: + metadata.Name = models.KafkaChildPrefix + c.Name + case models.OpenSearchAppKind: + metadata.Name = models.OpenSearchChildPrefix + c.Name + } - var twoFactorDelete []*v1beta1.TwoFactorDelete - if len(c.Spec.TwoFactorDelete) > 0 { - twoFactorDelete = []*v1beta1.TwoFactorDelete{ + return metadata +} + +func (r *CadenceReconciler) reconcileAppDCs(c *v1beta1.Cadence, availableCIDRs []string, app string) (any, error) { + network, err := r.reconcileNetwork(availableCIDRs, app) + if err != nil { + return nil, err + } + + genericDataCentreSpec := v1beta1.GenericDataCentreSpec{ + Region: c.Spec.DataCentres[0].Region, + CloudProvider: c.Spec.DataCentres[0].CloudProvider, + ProviderAccountName: c.Spec.DataCentres[0].ProviderAccountName, + Network: network, + } + + switch app { + case models.CassandraAppKind: + var privateIPBroadcast bool + if c.Spec.PrivateNetwork { + privateIPBroadcast = true + } + + genericDataCentreSpec.Name = models.CassandraChildDCName + cassandraDCs := []*v1beta1.CassandraDataCentre{ + { + GenericDataCentreSpec: genericDataCentreSpec, + NodeSize: c.Spec.CalculateNodeSize( + c.Spec.DataCentres[0].CloudProvider, + c.Spec.PackagedProvisioning[0].SolutionSize, + models.CassandraAppKind, + ), + NodesNumber: 3, + ReplicationFactor: 3, + PrivateIPBroadcastForDiscovery: privateIPBroadcast, + }, + } + + return cassandraDCs, nil + + case models.KafkaAppKind: + genericDataCentreSpec.Name = models.KafkaChildDCName + kafkaDCs := []*v1beta1.KafkaDataCentre{ + { + GenericDataCentreSpec: genericDataCentreSpec, + NodeSize: c.Spec.CalculateNodeSize( + c.Spec.DataCentres[0].CloudProvider, + c.Spec.PackagedProvisioning[0].SolutionSize, + models.KafkaAppKind, + ), + NodesNumber: 3, + }, + } + return kafkaDCs, nil + + case models.OpenSearchAppKind: + genericDataCentreSpec.Name = models.OpenSearchChildDCName + openSearchDCs := []*v1beta1.OpenSearchDataCentre{ { - Email: c.Spec.TwoFactorDelete[0].Email, - Phone: c.Spec.TwoFactorDelete[0].Phone, + GenericDataCentreSpec: genericDataCentreSpec, + NumberOfRacks: 3, }, } + return openSearchDCs, nil + } + + return nil, nil +} + +func (r *CadenceReconciler) reconcileAppSpec(c *v1beta1.Cadence, app, version string) any { + var twoFactorDelete []*v1beta1.TwoFactorDelete + for _, cadenceTFD := range c.Spec.TwoFactorDelete { + tfd := &v1beta1.TwoFactorDelete{ + Email: cadenceTFD.Email, + Phone: cadenceTFD.Phone, + } + twoFactorDelete = append(twoFactorDelete, tfd) } - var cassNodeSize, network string - var cassNodesNumber, cassReplicationFactor int - var cassPrivateIPBroadcastForDiscovery, cassPasswordAndUserAuth bool - for _, pp := range c.Spec.PackagedProvisioning { - cassNodeSize = pp.BundledCassandraSpec.NodeSize - network = pp.BundledCassandraSpec.Network - cassNodesNumber = pp.BundledCassandraSpec.NodesNumber - cassReplicationFactor = pp.BundledCassandraSpec.ReplicationFactor - cassPrivateIPBroadcastForDiscovery = pp.BundledCassandraSpec.PrivateIPBroadcastForDiscovery - cassPasswordAndUserAuth = pp.BundledCassandraSpec.PasswordAndUserAuth + + genericClusterSpec := v1beta1.GenericClusterSpec{ + Version: version, + SLATier: c.Spec.SLATier, + PrivateNetwork: c.Spec.PrivateNetwork, + TwoFactorDelete: twoFactorDelete, } - dcName := models.CassandraChildDCName - dcRegion := c.Spec.DataCentres[0].Region - cloudProvider := c.Spec.DataCentres[0].CloudProvider - providerAccountName := c.Spec.DataCentres[0].ProviderAccountName - - cassandraDataCentres := []*v1beta1.CassandraDataCentre{ - { - GenericDataCentreSpec: v1beta1.GenericDataCentreSpec{ - Name: dcName, - Region: dcRegion, - CloudProvider: cloudProvider, - ProviderAccountName: providerAccountName, - Network: network, - }, - NodeSize: cassNodeSize, - NodesNumber: cassNodesNumber, - ReplicationFactor: cassReplicationFactor, - PrivateIPBroadcastForDiscovery: cassPrivateIPBroadcastForDiscovery, - }, + switch app { + case models.CassandraAppKind: + genericClusterSpec.Name = models.CassandraChildPrefix + c.Name + spec := v1beta1.CassandraSpec{ + GenericClusterSpec: genericClusterSpec, + PasswordAndUserAuth: true, + PCICompliance: c.Spec.PCICompliance, + BundledUseOnly: true, + } + return spec + + case models.KafkaAppKind: + genericClusterSpec.Name = models.KafkaChildPrefix + c.Name + spec := v1beta1.KafkaSpec{ + GenericClusterSpec: genericClusterSpec, + PCICompliance: c.Spec.PCICompliance, + BundledUseOnly: true, + ReplicationFactor: 3, + PartitionsNumber: 3, + AllowDeleteTopics: true, + AutoCreateTopics: true, + ClientToClusterEncryption: c.Spec.DataCentres[0].ClientEncryption, + } + + return spec + + case models.OpenSearchAppKind: + managerNodes := []*v1beta1.ClusterManagerNodes{{ + NodeSize: c.Spec.CalculateNodeSize( + c.Spec.DataCentres[0].CloudProvider, + c.Spec.PackagedProvisioning[0].SolutionSize, + models.OpenSearchAppKind, + ), + DedicatedManager: false, + }} + + genericClusterSpec.Name = models.OpenSearchChildPrefix + c.Name + spec := v1beta1.OpenSearchSpec{ + GenericClusterSpec: genericClusterSpec, + PCICompliance: c.Spec.PCICompliance, + BundledUseOnly: true, + ClusterManagerNodes: managerNodes, + } + + return spec } - spec := v1beta1.CassandraSpec{ - GenericClusterSpec: v1beta1.GenericClusterSpec{ - Name: models.CassandraChildPrefix + c.Name, - Version: latestCassandraVersion, - SLATier: slaTier, - PrivateNetwork: privateNetwork, - TwoFactorDelete: twoFactorDelete, - }, - DataCentres: cassandraDataCentres, - PasswordAndUserAuth: cassPasswordAndUserAuth, - PCICompliance: pciCompliance, - BundledUseOnly: true, + return nil +} + +func (r *CadenceReconciler) reconcileNetwork(availableCIDRs []string, app string) (string, error) { + switch app { + case models.CassandraAppKind: + return availableCIDRs[1], nil + case models.KafkaAppKind: + return availableCIDRs[2], nil + case models.OpenSearchAppKind: + return availableCIDRs[3], nil } - return &v1beta1.Cassandra{ - TypeMeta: typeMeta, - ObjectMeta: metadata, - Spec: spec, - }, nil + return "", nil +} + +func (r *CadenceReconciler) reconcileAppVersion(appKind, appType string) (string, error) { + appVersions, err := r.API.ListAppVersions(appKind) + if err != nil { + return "", err + } + + sortedAppVersions := getSortedAppVersions(appVersions, appType) + if len(sortedAppVersions) == 0 { + return "", err + } + + switch appType { + case models.CassandraAppType, models.KafkaAppType: + return sortedAppVersions[len(sortedAppVersions)-1].String(), nil + + case models.OpenSearchAppType: + return sortedAppVersions[0].String(), nil + } + + return "", nil } func (r *CadenceReconciler) startSyncJob(c *v1beta1.Cadence) error { @@ -843,222 +989,70 @@ func (r *CadenceReconciler) newSyncJob(c *v1beta1.Cadence) scheduler.Job { } } -func (r *CadenceReconciler) newKafkaSpec(c *v1beta1.Cadence, latestKafkaVersion string) (*v1beta1.Kafka, error) { - typeMeta := v1.TypeMeta{ - Kind: models.KafkaKind, - APIVersion: models.ClustersV1beta1APIVersion, - } - - metadata := v1.ObjectMeta{ - Name: models.KafkaChildPrefix + c.Name, - Labels: map[string]string{models.ControlledByLabel: c.Name}, - Annotations: map[string]string{models.ResourceStateAnnotation: models.CreatingEvent}, - Namespace: c.ObjectMeta.Namespace, - Finalizers: []string{}, +func (r *CadenceReconciler) newKafkaSpec( + c *v1beta1.Cadence, + version string, + availableCIDRs []string, +) (*v1beta1.Kafka, error) { + typeMeta := r.reconcileTypeMeta(models.KafkaAppKind) + metadata := r.reconcileMetadata(c, models.KafkaAppKind) + dcs, err := r.reconcileAppDCs(c, availableCIDRs, models.KafkaAppKind) + if err != nil { + return nil, err } - if len(c.Spec.DataCentres) == 0 { - return nil, models.ErrZeroDataCentres + kafkaDCs, ok := dcs.([]*v1beta1.KafkaDataCentre) + if !ok { + return nil, fmt.Errorf("resource is not of type %T, got %T", []*v1beta1.KafkaDataCentre(nil), kafkaDCs) } - var kafkaTFD []*v1beta1.TwoFactorDelete - for _, cadenceTFD := range c.Spec.TwoFactorDelete { - twoFactorDelete := &v1beta1.TwoFactorDelete{ - Email: cadenceTFD.Email, - Phone: cadenceTFD.Phone, - } - kafkaTFD = append(kafkaTFD, twoFactorDelete) - } - bundledKafkaSpec := c.Spec.PackagedProvisioning[0].BundledKafkaSpec - - kafkaNetwork := bundledKafkaSpec.Network - kafkaNodeSize := bundledKafkaSpec.NodeSize - kafkaNodesNumber := bundledKafkaSpec.NodesNumber - dcName := models.KafkaChildDCName - dcRegion := c.Spec.DataCentres[0].Region - cloudProvider := c.Spec.DataCentres[0].CloudProvider - providerAccountName := c.Spec.DataCentres[0].ProviderAccountName - kafkaDataCentres := []*v1beta1.KafkaDataCentre{ - { - GenericDataCentreSpec: v1beta1.GenericDataCentreSpec{ - Name: dcName, - Region: dcRegion, - CloudProvider: cloudProvider, - ProviderAccountName: providerAccountName, - Network: kafkaNetwork, - }, - NodeSize: kafkaNodeSize, - NodesNumber: kafkaNodesNumber, - }, + spec := r.reconcileAppSpec(c, models.KafkaAppKind, version) + kafkaSpec, ok := spec.(v1beta1.KafkaSpec) + if !ok { + return nil, fmt.Errorf("resource is not of type %T, got %T", v1beta1.KafkaSpec{}, spec) } - slaTier := c.Spec.SLATier - privateClusterNetwork := c.Spec.PrivateNetwork - pciCompliance := c.Spec.PCICompliance - clientEncryption := c.Spec.DataCentres[0].ClientEncryption - spec := v1beta1.KafkaSpec{ - GenericClusterSpec: v1beta1.GenericClusterSpec{ - Name: models.KafkaChildPrefix + c.Name, - Version: latestKafkaVersion, - SLATier: slaTier, - PrivateNetwork: privateClusterNetwork, - TwoFactorDelete: kafkaTFD, - }, - DataCentres: kafkaDataCentres, - ReplicationFactor: bundledKafkaSpec.ReplicationFactor, - PartitionsNumber: bundledKafkaSpec.PartitionsNumber, - AllowDeleteTopics: true, - AutoCreateTopics: true, - ClientToClusterEncryption: clientEncryption, - BundledUseOnly: true, - PCICompliance: pciCompliance, - } + kafkaSpec.DataCentres = kafkaDCs return &v1beta1.Kafka{ TypeMeta: typeMeta, ObjectMeta: metadata, - Spec: spec, + Spec: kafkaSpec, }, nil } -func (r *CadenceReconciler) newOpenSearchSpec(c *v1beta1.Cadence, oldestOpenSearchVersion string) (*v1beta1.OpenSearch, error) { - typeMeta := v1.TypeMeta{ - Kind: models.OpenSearchKind, - APIVersion: models.ClustersV1beta1APIVersion, - } - - metadata := v1.ObjectMeta{ - Name: models.OpenSearchChildPrefix + c.Name, - Labels: map[string]string{models.ControlledByLabel: c.Name}, - Annotations: map[string]string{models.ResourceStateAnnotation: models.CreatingEvent}, - Namespace: c.ObjectMeta.Namespace, - Finalizers: []string{}, +func (r *CadenceReconciler) newOpenSearchSpec( + c *v1beta1.Cadence, + version string, + availableCIDRs []string, +) (*v1beta1.OpenSearch, error) { + typeMeta := r.reconcileTypeMeta(models.OpenSearchAppKind) + metadata := r.reconcileMetadata(c, models.OpenSearchAppKind) + dcs, err := r.reconcileAppDCs(c, availableCIDRs, models.OpenSearchAppKind) + if err != nil { + return nil, err } - if len(c.Spec.DataCentres) < 1 { - return nil, models.ErrZeroDataCentres + osDCs, ok := dcs.([]*v1beta1.OpenSearchDataCentre) + if !ok { + return nil, fmt.Errorf("resource is not of type %T, got %T", []*v1beta1.OpenSearchDataCentre(nil), osDCs) } - bundledOpenSearchSpec := c.Spec.PackagedProvisioning[0].BundledOpenSearchSpec - - managerNodes := []*v1beta1.ClusterManagerNodes{{ - NodeSize: bundledOpenSearchSpec.NodeSize, - DedicatedManager: false, - }} - - oNumberOfRacks := bundledOpenSearchSpec.NumberOfRacks - slaTier := c.Spec.SLATier - privateClusterNetwork := c.Spec.PrivateNetwork - pciCompliance := c.Spec.PCICompliance - - var twoFactorDelete []*v1beta1.TwoFactorDelete - if len(c.Spec.TwoFactorDelete) > 0 { - twoFactorDelete = []*v1beta1.TwoFactorDelete{ - { - Email: c.Spec.TwoFactorDelete[0].Email, - Phone: c.Spec.TwoFactorDelete[0].Phone, - }, - } + spec := r.reconcileAppSpec(c, models.OpenSearchAppKind, version) + osSpec, ok := spec.(v1beta1.OpenSearchSpec) + if !ok { + return nil, fmt.Errorf("resource is not of type %T, got %T", v1beta1.OpenSearchSpec{}, spec) } - osNetwork := bundledOpenSearchSpec.Network - dcName := models.OpenSearchChildDCName - dcRegion := c.Spec.DataCentres[0].Region - cloudProvider := c.Spec.DataCentres[0].CloudProvider - providerAccountName := c.Spec.DataCentres[0].ProviderAccountName - - osDataCentres := []*v1beta1.OpenSearchDataCentre{ - { - GenericDataCentreSpec: v1beta1.GenericDataCentreSpec{ - Name: dcName, - Region: dcRegion, - CloudProvider: cloudProvider, - ProviderAccountName: providerAccountName, - Network: osNetwork, - }, - NumberOfRacks: oNumberOfRacks, - }, - } - spec := v1beta1.OpenSearchSpec{ - GenericClusterSpec: v1beta1.GenericClusterSpec{ - Name: models.OpenSearchChildPrefix + c.Name, - Version: oldestOpenSearchVersion, - SLATier: slaTier, - PrivateNetwork: privateClusterNetwork, - TwoFactorDelete: twoFactorDelete, - }, - DataCentres: osDataCentres, - ClusterManagerNodes: managerNodes, - BundledUseOnly: true, - PCICompliance: pciCompliance, - } + osSpec.DataCentres = osDCs return &v1beta1.OpenSearch{ TypeMeta: typeMeta, ObjectMeta: metadata, - Spec: spec, + Spec: osSpec, }, nil } -func (r *CadenceReconciler) deletePackagedResources( - ctx context.Context, - c *v1beta1.Cadence, - packagedProvisioning *v1beta1.PackagedProvisioning, -) error { - labelsToQuery := fmt.Sprintf("%s=%s", models.ControlledByLabel, c.Name) - selector, err := labels.Parse(labelsToQuery) - if err != nil { - return err - } - - cassandraList := &v1beta1.CassandraList{} - err = r.Client.List(ctx, cassandraList, &client.ListOptions{LabelSelector: selector}) - if err != nil { - return err - } - - if len(cassandraList.Items) != 0 { - for _, cassandraCluster := range cassandraList.Items { - err = r.Client.Delete(ctx, &cassandraCluster) - if err != nil { - return err - } - } - } - - if packagedProvisioning.UseAdvancedVisibility { - kafkaList := &v1beta1.KafkaList{} - err = r.Client.List(ctx, kafkaList, &client.ListOptions{LabelSelector: selector}) - if err != nil { - return err - } - if len(kafkaList.Items) != 0 { - for _, kafkaCluster := range kafkaList.Items { - err = r.Client.Delete(ctx, &kafkaCluster) - if err != nil { - return err - } - } - } - - osList := &v1beta1.OpenSearchList{} - err = r.Client.List(ctx, osList, &client.ListOptions{LabelSelector: selector}) - if err != nil { - return err - } - if len(osList.Items) != 0 { - for _, osCluster := range osList.Items { - err = r.Client.Delete(ctx, &osCluster) - if err != nil { - return err - } - } - } - } - - return nil -} - // SetupWithManager sets up the controller with the Manager. func (r *CadenceReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). diff --git a/controllers/clusters/cadence_controller_test.go b/controllers/clusters/cadence_controller_test.go index 6e6e94f6..5472eb65 100644 --- a/controllers/clusters/cadence_controller_test.go +++ b/controllers/clusters/cadence_controller_test.go @@ -164,31 +164,6 @@ var _ = Describe("Cadence Controller", func() { c := &v1beta1.Cadence{} g.Expect(k8sClient.Get(ctx, key, c)).ShouldNot(Succeed()) }, timeout, interval).Should(Succeed()) - - opensearch := &v1beta1.OpenSearch{} - kafka := &v1beta1.Kafka{} - cassandra := &v1beta1.Cassandra{} - - Eventually(func() error { - return k8sClient.Get(ctx, types.NamespacedName{ - Namespace: metav1.NamespaceDefault, - Name: models.OpenSearchChildPrefix + cadence.Name}, opensearch, - ) - }, timeout, interval).ShouldNot(Succeed(), "should not get opensearch resource") - - Eventually(func() error { - return k8sClient.Get(ctx, types.NamespacedName{ - Namespace: metav1.NamespaceDefault, - Name: models.CassandraChildPrefix + cadence.Name}, cassandra, - ) - }, timeout, interval).ShouldNot(Succeed(), "should not get cassandra resource") - - Eventually(func() error { - return k8sClient.Get(ctx, types.NamespacedName{ - Namespace: metav1.NamespaceDefault, - Name: models.KafkaChildPrefix + cadence.Name}, kafka, - ) - }, timeout, interval).ShouldNot(Succeed(), "should not get kafka resource") }) }) }) diff --git a/controllers/clusters/datatest/cadence_v1beta1_packaged.yaml b/controllers/clusters/datatest/cadence_v1beta1_packaged.yaml index 6cd7aaff..c8395435 100644 --- a/controllers/clusters/datatest/cadence_v1beta1_packaged.yaml +++ b/controllers/clusters/datatest/cadence_v1beta1_packaged.yaml @@ -21,24 +21,8 @@ spec: # dependencyCdcId: test-cassandra-cdcid # dependencyVpcType: "VPC_PEERED" packagedProvisioning: - - bundledCassandraSpec: - nodeSize: "CAS-DEV-t4g.small-5" - network: "10.2.0.0/16" - replicationFactor: 3 - nodesNumber: 3 - privateIPBroadcastForDiscovery: false - passwordAndUserAuth: true + - solutionSize: "Developer" useAdvancedVisibility: true - bundledKafkaSpec: - nodeSize: "KFK-DEV-t4g.small-5" - nodesNumber: 3 - network: "10.3.0.0/16" - replicationFactor: 3 - partitionsNumber: 3 - bundledOpenSearchSpec: - nodeSize: "SRH-DEV-t4g.small-5" - numberOfRacks: 3 - network: "10.4.0.0/16" # twoFactorDelete: # - email: "example@netapp.com" privateNetworkCluster: false diff --git a/controllers/clusters/helpers.go b/controllers/clusters/helpers.go index 2380137c..ae5f0024 100644 --- a/controllers/clusters/helpers.go +++ b/controllers/clusters/helpers.go @@ -20,7 +20,10 @@ import ( "context" "encoding/json" "fmt" + "net" "sort" + "strconv" + "strings" "github.com/go-logr/logr" "github.com/hashicorp/go-version" @@ -332,3 +335,36 @@ func reconcileExternalChanges(c client.Client, r record.EventRecorder, obj Objec return nil } + +func CalculateAvailableNetworks(cadenceNetwork string) ([]string, error) { + clustersCIDRs := []string{cadenceNetwork} + for i := 0; i <= 3; i++ { + newCIDR, err := incrementCIDR(clustersCIDRs[i]) + if err != nil { + return nil, err + } + clustersCIDRs = append(clustersCIDRs, newCIDR) + } + + return clustersCIDRs, nil +} + +func incrementCIDR(cidr string) (string, error) { + ip, ipnet, err := net.ParseCIDR(cidr) + if err != nil { + return "", err + } + + ipParts := strings.Split(ip.String(), ".") + secondOctet, err := strconv.Atoi(ipParts[1]) + if err != nil { + return "", err + } + + secondOctet++ + ipParts[1] = strconv.Itoa(secondOctet) + prefixLength, _ := ipnet.Mask.Size() + + incrementedIP := strings.Join(ipParts, ".") + return fmt.Sprintf("%s/%d", incrementedIP, prefixLength), nil +} diff --git a/pkg/models/cadence_apiv2.go b/pkg/models/cadence_apiv2.go index a3d96032..27d22712 100644 --- a/pkg/models/cadence_apiv2.go +++ b/pkg/models/cadence_apiv2.go @@ -21,6 +21,81 @@ const ( AWSSecretAccessKey = "awsSecretAccessKey" ) +// Related to Packaged Provisioning +const ( + DeveloperSize = "Developer" + ProductionStarterSize = "Production-Starter" + ProductionSmallSize = "Production-Small" +) + +var ( + AWSCassandraSizes = map[string]string{ + DeveloperSize: "CAS-DEV-t4g.small-5", + ProductionStarterSize: "CAS-PRD-m6g.large-120", + ProductionSmallSize: "CAS-PRD-r6g.large-800", + } + AzureCassandraSizes = map[string]string{ + DeveloperSize: "Standard_DS2_v2-256-an", + ProductionStarterSize: "Standard_DS2_v2-256-an", + ProductionSmallSize: "Standard_DS12_v2-512-an", + } + GCPCassandraSizes = map[string]string{ + DeveloperSize: "CAS-DEV-n1-standard-1-5", + ProductionStarterSize: "CAS-PRD-n2-standard-2-250", + ProductionSmallSize: "CAS-PRD-n2-highmem-2-400", + } + AWSKafkaSizes = map[string]string{ + DeveloperSize: "KFK-DEV-t4g.small-5", + ProductionStarterSize: "KFK-PRD-r6g.large-250", + ProductionSmallSize: "KFK-PRD-r6g.large-400", + } + AzureKafkaSizes = map[string]string{ + DeveloperSize: "Standard_DS1_v2-32", + ProductionStarterSize: "Standard_DS11_v2-512", + ProductionSmallSize: "Standard_DS11_v2-512", + } + GCPKafkaSizes = map[string]string{ + DeveloperSize: "n1-standard-1-80", + ProductionStarterSize: "n1-highmem-2-400", + ProductionSmallSize: "n1-highmem-2-400", + } + AWSOpenSearchSizes = map[string]string{ + DeveloperSize: "SRH-DEV-t4g.small-5", + ProductionStarterSize: "SRH-PRD-m6g.large-250", + ProductionSmallSize: "SRH-PRD-r6g.large-800", + } + AzureOpenSearchSizes = map[string]string{ + DeveloperSize: "SRH-DEV-DS1_v2-5-an", + ProductionStarterSize: "SRH-PRD-D2s_v5-250-an", + ProductionSmallSize: "SRH-PRD-E2s_v4-800-an", + } + GCPOpenSearchSizes = map[string]string{ + DeveloperSize: "SRH-DEV-n1-standard-1-30", + ProductionStarterSize: "SRH-PRD-n2-standard-2-250", + ProductionSmallSize: "SRH-PRD-n2-highmem-2-800", + } +) + +type solutionSizesMap map[string]map[string]map[string]string + +var SolutionSizesMap = solutionSizesMap{ + AWSVPC: { + CassandraAppKind: AWSCassandraSizes, + KafkaAppKind: AWSKafkaSizes, + OpenSearchAppKind: AWSOpenSearchSizes, + }, + AZUREAZ: { + CassandraAppKind: AzureCassandraSizes, + KafkaAppKind: AzureKafkaSizes, + OpenSearchAppKind: AzureOpenSearchSizes, + }, + GCP: { + CassandraAppKind: GCPCassandraSizes, + KafkaAppKind: GCPKafkaSizes, + OpenSearchAppKind: GCPOpenSearchSizes, + }, +} + type CadenceCluster struct { GenericClusterFields `json:",inline"` diff --git a/pkg/models/errors.go b/pkg/models/errors.go index 0fe345f3..791d54f3 100644 --- a/pkg/models/errors.go +++ b/pkg/models/errors.go @@ -78,4 +78,5 @@ var ( ErrClusterIsNotReadyToUpdate = errors.New("cluster is not ready to update") ErrKubeVirtAddonNotFound = errors.New("cannot create KubeVirt based resources automatially without KubeVirt operator installed. Please install KubeVirt add-on") ErrOpenSearchNumberOfRacksInvalid = errors.New("number of racks should be between 2 and 5") + ErrInvalidCIDR = errors.New("invalid Cadence Network for Packaged Provisioning solution. Second octet must be in 1-251 range") )