From 418ff3bfccd3432c533ff1c2bee01903fbfd6cfc Mon Sep 17 00:00:00 2001 From: Bohdan Siryk Date: Mon, 26 Feb 2024 15:44:18 +0200 Subject: [PATCH] postgres cr codebase was refactored --- .secrets.baseline | 12 +- apis/clusters/v1beta1/postgresql_types.go | 301 ++++++++++-------- apis/clusters/v1beta1/postgresql_webhook.go | 27 +- apis/clusters/v1beta1/structs.go | 10 - .../clusters/v1beta1/zz_generated.deepcopy.go | 58 +++- .../clusters.instaclustr.com_postgresqls.yaml | 153 ++++++--- .../clusters/datatest/postgresql_v1beta1.yaml | 2 +- controllers/clusters/postgresql_controller.go | 286 +++++++---------- pkg/instaclustr/client.go | 10 +- pkg/instaclustr/interfaces.go | 2 +- pkg/instaclustr/mock/client.go | 2 +- pkg/models/postgresql_apiv2.go | 31 +- 12 files changed, 495 insertions(+), 399 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 0559fadd6..8cfeef9ca 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -319,21 +319,21 @@ "filename": "apis/clusters/v1beta1/postgresql_types.go", "hashed_secret": "5ffe533b830f08a0326348a9160afafc8ada44db", "is_verified": false, - "line_number": 354 + "line_number": 370 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/postgresql_types.go", "hashed_secret": "a3d7d4a96d18c8fc5a1cf9c9c01c45b4690b4008", "is_verified": false, - "line_number": 360 + "line_number": 376 }, { "type": "Secret Keyword", "filename": "apis/clusters/v1beta1/postgresql_types.go", "hashed_secret": "a57ce131bd944bdf8ba2f2f93e179dc416ed0315", "is_verified": false, - "line_number": 480 + "line_number": 425 } ], "apis/clusters/v1beta1/redis_types.go": [ @@ -574,7 +574,7 @@ "filename": "controllers/clusters/postgresql_controller.go", "hashed_secret": "5ffe533b830f08a0326348a9160afafc8ada44db", "is_verified": false, - "line_number": 1272 + "line_number": 1221 } ], "controllers/clusters/zookeeper_controller_test.go": [ @@ -739,7 +739,7 @@ "filename": "pkg/instaclustr/client.go", "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", "is_verified": false, - "line_number": 2072 + "line_number": 2078 } ], "pkg/instaclustr/mock/client.go": [ @@ -1146,5 +1146,5 @@ } ] }, - "generated_at": "2024-02-26T10:23:28Z" + "generated_at": "2024-02-26T13:44:13Z" } diff --git a/apis/clusters/v1beta1/postgresql_types.go b/apis/clusters/v1beta1/postgresql_types.go index 6d67a628a..62c34ec8c 100644 --- a/apis/clusters/v1beta1/postgresql_types.go +++ b/apis/clusters/v1beta1/postgresql_types.go @@ -18,7 +18,6 @@ package v1beta1 import ( "context" - "encoding/json" "fmt" "strconv" @@ -31,15 +30,22 @@ import ( clusterresourcesv1beta1 "github.com/instaclustr/operator/apis/clusterresources/v1beta1" "github.com/instaclustr/operator/pkg/models" + "github.com/instaclustr/operator/pkg/utils/slices" ) type PgDataCentre struct { - DataCentre `json:",inline"` + GenericDataCentreSpec `json:",inline"` // PostgreSQL options - ClientEncryption bool `json:"clientEncryption"` + ClientEncryption bool `json:"clientEncryption"` + NodeSize string `json:"nodeSize"` + NumberOfNodes int `json:"numberOfNodes"` + + //+kubebuilder:Validation:MaxItems:=1 InterDataCentreReplication []*InterDataCentreReplication `json:"interDataCentreReplication,omitempty"` + //+kubebuilder:Validation:MaxItems:=1 IntraDataCentreReplication []*IntraDataCentreReplication `json:"intraDataCentreReplication"` - PGBouncer []*PgBouncer `json:"pgBouncer,omitempty"` + //+kubebuilder:Validation:MaxItems:=1 + PGBouncer []*PgBouncer `json:"pgBouncer,omitempty"` } type PgBouncer struct { @@ -70,26 +76,37 @@ type PgRestoreFrom struct { } // PgSpec defines the desired state of PostgreSQL -// +kubebuilder:validation:XValidation:rule="has(self.extensions) == has(oldSelf.extensions)",message="extensions cannot be changed after it is set" type PgSpec struct { - PgRestoreFrom *PgRestoreFrom `json:"pgRestoreFrom,omitempty"` - Cluster `json:",inline"` - DataCentres []*PgDataCentre `json:"dataCentres,omitempty"` - ClusterConfigurations map[string]string `json:"clusterConfigurations,omitempty"` + GenericClusterSpec `json:",inline"` + SynchronousModeStrict bool `json:"synchronousModeStrict,omitempty"` - UserRefs []*Reference `json:"userRefs,omitempty" dcomparisonSkip:"true"` + ClusterConfigurations map[string]string `json:"clusterConfigurations,omitempty"` + PgRestoreFrom *PgRestoreFrom `json:"pgRestoreFrom,omitempty" dcomparisonSkip:"true"` + //+kubebuilder:Validation:MinItems:=1 + //+kubebuilder:Validation:MaxItems=2 + DataCentres []*PgDataCentre `json:"dataCentres,omitempty"` + UserRefs []*Reference `json:"userRefs,omitempty" dcomparisonSkip:"true"` //+kubebuilder:validate:MaxItems:=1 ResizeSettings []*ResizeSettings `json:"resizeSettings,omitempty" dcomparisonSkip:"true"` - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="extensions cannot be changed after it is set" - Extensions PgExtensions `json:"extensions,omitempty"` + Extensions PgExtensions `json:"extensions,omitempty"` } // PgStatus defines the observed state of PostgreSQL type PgStatus struct { - ClusterStatus `json:",inline"` + GenericStatus `json:",inline"` + + DataCentres []*PgDataCentreStatus `json:"dataCentres"` + DefaultUserSecretRef *Reference `json:"userRefs,omitempty"` } +type PgDataCentreStatus struct { + GenericDataCentreStatus `json:",inline"` + + NumberOfNodes int `json:"numberOfNodes"` + Nodes []*Node `json:"nodes"` +} + //+kubebuilder:object:root=true //+kubebuilder:subresource:status //+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" @@ -196,14 +213,10 @@ func (pgs *PgSpec) HasRestore() bool { func (pgs *PgSpec) ToInstAPI() *models.PGCluster { return &models.PGCluster{ - Name: pgs.Name, + GenericClusterFields: pgs.GenericClusterSpec.ToInstAPI(), PostgreSQLVersion: pgs.Version, DataCentres: pgs.DCsToInstAPI(), SynchronousModeStrict: pgs.SynchronousModeStrict, - Description: pgs.Description, - PrivateNetworkCluster: pgs.PrivateNetworkCluster, - SLATier: pgs.SLATier, - TwoFactorDelete: pgs.TwoFactorDeletesToInstAPI(), Extensions: pgs.Extensions.ToInstAPI(), } } @@ -224,11 +237,13 @@ func (pgs *PgSpec) ToClusterUpdate() *models.PGClusterUpdate { func (pdc *PgDataCentre) ToInstAPI() *models.PGDataCentre { return &models.PGDataCentre{ - DataCentre: pdc.DataCentre.ToInstAPI(), - PGBouncer: pdc.PGBouncerToInstAPI(), + GenericDataCentreFields: pdc.GenericDataCentreSpec.ToInstAPI(), ClientToClusterEncryption: pdc.ClientEncryption, + PGBouncer: pdc.PGBouncerToInstAPI(), InterDataCentreReplication: pdc.InterDCReplicationToInstAPI(), IntraDataCentreReplication: pdc.IntraDCReplicationToInstAPI(), + NodeSize: pdc.NodeSize, + NumberOfNodes: pdc.NumberOfNodes, } } @@ -268,28 +283,29 @@ func (c *PostgreSQL) IsSpecEqual(spec PgSpec) bool { } func (pgs *PgSpec) IsEqual(iPG PgSpec) bool { - return pgs.Cluster.IsEqual(iPG.Cluster) && + return pgs.GenericClusterSpec.Equals(&iPG.GenericClusterSpec) && pgs.SynchronousModeStrict == iPG.SynchronousModeStrict && - pgs.AreDCsEqual(iPG.DataCentres) + pgs.DCsEqual(iPG.DataCentres) && + slices.EqualsUnordered(pgs.Extensions, iPG.Extensions) } -func (pgs *PgSpec) AreDCsEqual(iDCs []*PgDataCentre) bool { - if len(pgs.DataCentres) != len(iDCs) { +func (pgs *PgSpec) DCsEqual(instaModels []*PgDataCentre) bool { + if len(pgs.DataCentres) != len(instaModels) { return false } - for i, iDC := range iDCs { - dc := pgs.DataCentres[i] + m := map[string]*PgDataCentre{} + for _, dc := range pgs.DataCentres { + m[dc.Name] = dc + } - if iDC.Name != dc.Name { - continue + for _, iDC := range instaModels { + dc, ok := m[iDC.Name] + if !ok { + return false } - if !dc.IsEqual(iDC.DataCentre) || - dc.ClientEncryption != iDC.ClientEncryption || - !dc.AreInterDCReplicationsEqual(iDC.InterDataCentreReplication) || - !dc.AreIntraDCReplicationsEqual(iDC.IntraDataCentreReplication) || - !dc.ArePGBouncersEqual(iDC.PGBouncer) { + if !dc.Equals(iDC) { return false } } @@ -362,112 +378,41 @@ func (pg *PostgreSQL) NewUserSecret(defaultUserPassword string) *k8scorev1.Secre } } -func (pg *PostgreSQL) FromInstAPI(iData []byte) (*PostgreSQL, error) { - iPg := &models.PGCluster{} - err := json.Unmarshal(iData, iPg) - if err != nil { - return nil, err - } - - return &PostgreSQL{ - TypeMeta: pg.TypeMeta, - ObjectMeta: pg.ObjectMeta, - Spec: pg.Spec.FromInstAPI(iPg), - Status: pg.Status.FromInstAPI(iPg), - }, nil +func (pg *PostgreSQL) FromInstAPI(instaModel *models.PGCluster) { + pg.Spec.FromInstAPI(instaModel) + pg.Status.FromInstAPI(instaModel) } -func (pg *PostgreSQL) DefaultPasswordFromInstAPI(iData []byte) (string, error) { - type defaultPasswordResponse struct { - DefaultUserPassword string `json:"defaultUserPassword,omitempty"` - } +func (pgs *PgSpec) FromInstAPI(instaModel *models.PGCluster) { + pgs.GenericClusterSpec.FromInstAPI(&instaModel.GenericClusterFields, instaModel.PostgreSQLVersion) + pgs.SynchronousModeStrict = instaModel.SynchronousModeStrict - dpr := &defaultPasswordResponse{} - err := json.Unmarshal(iData, dpr) - if err != nil { - return "", err - } - - return dpr.DefaultUserPassword, nil + pgs.DCsFromInstAPI(instaModel.DataCentres) } -func (pgs *PgSpec) FromInstAPI(iPg *models.PGCluster) PgSpec { - return PgSpec{ - Cluster: Cluster{ - Name: iPg.Name, - Version: iPg.PostgreSQLVersion, - PCICompliance: iPg.PCIComplianceMode, - PrivateNetworkCluster: iPg.PrivateNetworkCluster, - Description: iPg.Description, - SLATier: iPg.SLATier, - TwoFactorDelete: pgs.Cluster.TwoFactorDeleteFromInstAPI(iPg.TwoFactorDelete), - }, - DataCentres: pgs.DCsFromInstAPI(iPg.DataCentres), - SynchronousModeStrict: iPg.SynchronousModeStrict, +func (pgs *PgSpec) DCsFromInstAPI(instaModels []*models.PGDataCentre) { + dcs := make([]*PgDataCentre, len(instaModels)) + for i, instaModel := range instaModels { + dc := &PgDataCentre{} + dc.FromInstAPI(instaModel) + dcs[i] = dc } + pgs.DataCentres = dcs } -func (pgs *PgSpec) DCsFromInstAPI(iDCs []*models.PGDataCentre) (dcs []*PgDataCentre) { - for _, iDC := range iDCs { - dcs = append(dcs, &PgDataCentre{ - DataCentre: pgs.Cluster.DCFromInstAPI(iDC.DataCentre), - ClientEncryption: iDC.ClientToClusterEncryption, - InterDataCentreReplication: pgs.InterDCReplicationsFromInstAPI(iDC.InterDataCentreReplication), - IntraDataCentreReplication: pgs.IntraDCReplicationsFromInstAPI(iDC.IntraDataCentreReplication), - PGBouncer: pgs.PGBouncerFromInstAPI(iDC.PGBouncer), - }) - } - return -} - -func (pgs *PgSpec) InterDCReplicationsFromInstAPI(iIRs []*models.PGInterDCReplication) (irs []*InterDataCentreReplication) { - for _, iIR := range iIRs { - ir := &InterDataCentreReplication{ - IsPrimaryDataCentre: iIR.IsPrimaryDataCentre, - } - irs = append(irs, ir) - } - return -} - -func (pgs *PgSpec) IntraDCReplicationsFromInstAPI(iIRs []*models.PGIntraDCReplication) (irs []*IntraDataCentreReplication) { - for _, iIR := range iIRs { - ir := &IntraDataCentreReplication{ - ReplicationMode: iIR.ReplicationMode, - } - irs = append(irs, ir) - } - return +func (pgs *PgStatus) FromInstAPI(instaModel *models.PGCluster) { + pgs.GenericStatus.FromInstAPI(&instaModel.GenericClusterFields) + pgs.DCsFromInstAPI(instaModel.DataCentres) } -func (pgs *PgSpec) PGBouncerFromInstAPI(iPgBs []*models.PGBouncer) (pgbs []*PgBouncer) { - for _, iPgB := range iPgBs { - pgb := &PgBouncer{ - PGBouncerVersion: iPgB.PGBouncerVersion, - PoolMode: iPgB.PoolMode, - } - pgbs = append(pgbs, pgb) +func (pgs *PgStatus) DCsFromInstAPI(instaModels []*models.PGDataCentre) { + dcs := make([]*PgDataCentreStatus, len(instaModels)) + for i, instaModel := range instaModels { + dc := &PgDataCentreStatus{} + dc.FromInstAPI(instaModel) + dcs[i] = dc } - return -} - -func (pgs *PgStatus) FromInstAPI(iPg *models.PGCluster) PgStatus { - return PgStatus{ - ClusterStatus: ClusterStatus{ - ID: iPg.ID, - State: iPg.Status, - DataCentres: pgs.DCsFromInstAPI(iPg.DataCentres), - CurrentClusterOperationStatus: iPg.CurrentClusterOperationStatus, - MaintenanceEvents: pgs.MaintenanceEvents, - }, - } -} - -func (pgs *PgStatus) DCsFromInstAPI(iDCs []*models.PGDataCentre) (dcs []*DataCentreStatus) { - for _, iDC := range iDCs { - dcs = append(dcs, pgs.ClusterStatus.DCFromInstAPI(iDC.DataCentre)) - } - return + pgs.DataCentres = dcs } func GetDefaultPgUserSecret( @@ -491,7 +436,7 @@ func GetDefaultPgUserSecret( func (pg *PostgreSQL) GetExposePorts() []k8scorev1.ServicePort { var exposePorts []k8scorev1.ServicePort - if !pg.Spec.PrivateNetworkCluster { + if !pg.Spec.PrivateNetwork { exposePorts = []k8scorev1.ServicePort{ { Name: models.PostgreSQLDB, @@ -541,3 +486,101 @@ func (p PgExtensions) ToInstAPI() []*models.PGExtension { return iExtensions } + +func (pdc *PgDataCentre) Equals(o *PgDataCentre) bool { + return pdc.GenericDataCentreSpec.Equals(&o.GenericDataCentreSpec) && + pdc.ClientEncryption == o.ClientEncryption && + pdc.NumberOfNodes == o.NumberOfNodes && + pdc.NodeSize == o.NodeSize && + slices.EqualsPtr(pdc.InterDataCentreReplication, o.InterDataCentreReplication) && + slices.EqualsPtr(pdc.IntraDataCentreReplication, o.IntraDataCentreReplication) && + slices.EqualsPtr(pdc.PGBouncer, o.PGBouncer) +} + +func (pdc *PgDataCentre) FromInstAPI(instaModel *models.PGDataCentre) { + pdc.GenericDataCentreSpec.FromInstAPI(&instaModel.GenericDataCentreFields) + + pdc.ClientEncryption = instaModel.ClientToClusterEncryption + pdc.NodeSize = instaModel.NodeSize + pdc.NumberOfNodes = instaModel.NumberOfNodes + + pdc.InterReplicationsFromInstAPI(instaModel.InterDataCentreReplication) + pdc.IntraReplicationsFromInstAPI(instaModel.IntraDataCentreReplication) + pdc.PGBouncerFromInstAPI(instaModel.PGBouncer) +} + +func (pdc *PgDataCentre) InterReplicationsFromInstAPI(instaModels []*models.PGInterDCReplication) { + pdc.InterDataCentreReplication = make([]*InterDataCentreReplication, len(instaModels)) + for i, instaModel := range instaModels { + pdc.InterDataCentreReplication[i] = &InterDataCentreReplication{ + IsPrimaryDataCentre: instaModel.IsPrimaryDataCentre, + } + } +} + +func (pdc *PgDataCentre) IntraReplicationsFromInstAPI(instaModels []*models.PGIntraDCReplication) { + pdc.IntraDataCentreReplication = make([]*IntraDataCentreReplication, len(instaModels)) + for i, instaModel := range instaModels { + pdc.IntraDataCentreReplication[i] = &IntraDataCentreReplication{ + ReplicationMode: instaModel.ReplicationMode, + } + } +} + +func (pdc *PgDataCentre) PGBouncerFromInstAPI(instaModels []*models.PGBouncer) { + pdc.PGBouncer = make([]*PgBouncer, len(instaModels)) + for i, instaModel := range instaModels { + pdc.PGBouncer[i] = &PgBouncer{ + PGBouncerVersion: instaModel.PGBouncerVersion, + PoolMode: instaModel.PoolMode, + } + } +} + +func (pgs *PgSpec) ClusterConfigurationsFromInstAPI(instaModels []*models.ClusterConfigurations) { + pgs.ClusterConfigurations = make(map[string]string, len(instaModels)) + for _, instaModel := range instaModels { + pgs.ClusterConfigurations[instaModel.ParameterName] = instaModel.ParameterValue + } +} + +func (s *PgDataCentreStatus) FromInstAPI(instaModel *models.PGDataCentre) { + s.GenericDataCentreStatus.FromInstAPI(&instaModel.GenericDataCentreFields) + s.Nodes = nodesFromInstAPI(instaModel.Nodes) + s.NumberOfNodes = instaModel.NumberOfNodes +} + +func (s *PgDataCentreStatus) Equals(o *PgDataCentreStatus) bool { + return s.GenericDataCentreStatus.Equals(&o.GenericDataCentreStatus) && + s.NumberOfNodes == o.NumberOfNodes && + nodesEqual(s.Nodes, o.Nodes) +} + +func (pgs *PgStatus) Equals(o *PgStatus) bool { + return pgs.GenericStatus.Equals(&o.GenericStatus) && + pgs.DCsEqual(o.DataCentres) +} + +func (pgs *PgStatus) DCsEqual(o []*PgDataCentreStatus) bool { + if len(pgs.DataCentres) != len(o) { + return false + } + + m := map[string]*PgDataCentreStatus{} + for _, dc := range pgs.DataCentres { + m[dc.ID] = dc + } + + for _, iDC := range o { + dc, ok := m[iDC.ID] + if !ok { + return false + } + + if !dc.Equals(iDC) { + return false + } + } + + return true +} diff --git a/apis/clusters/v1beta1/postgresql_webhook.go b/apis/clusters/v1beta1/postgresql_webhook.go index e189c00ae..7c3d7fed2 100644 --- a/apis/clusters/v1beta1/postgresql_webhook.go +++ b/apis/clusters/v1beta1/postgresql_webhook.go @@ -30,6 +30,7 @@ import ( "github.com/instaclustr/operator/pkg/models" "github.com/instaclustr/operator/pkg/utils/requiredfieldsvalidator" + "github.com/instaclustr/operator/pkg/utils/slices" "github.com/instaclustr/operator/pkg/validation" ) @@ -69,10 +70,6 @@ func (pg *PostgreSQL) Default() { models.ResourceStateAnnotation: "", }) } - - for _, dataCentre := range pg.Spec.DataCentres { - dataCentre.SetDefaultValues() - } } // ValidateCreate implements webhook.Validator so a webhook will be registered for the type @@ -97,7 +94,7 @@ func (pgv *pgValidator) ValidateCreate(ctx context.Context, obj runtime.Object) } } - err = pg.Spec.Cluster.ValidateCreation() + err = pg.Spec.GenericClusterSpec.ValidateCreation() if err != nil { return err } @@ -130,7 +127,7 @@ func (pgv *pgValidator) ValidateCreate(ctx context.Context, obj runtime.Object) return models.ErrOnPremisesWithMultiDC } - err = dc.DataCentre.ValidateCreation() + err = dc.GenericDataCentreSpec.validateCreation() if err != nil { return err } @@ -174,6 +171,10 @@ func (pgv *pgValidator) ValidateUpdate(ctx context.Context, old runtime.Object, postgresqllog.Info("validate update", "name", pg.Name) + if pg.Annotations[models.ResourceStateAnnotation] == models.SyncingEvent { + return nil + } + // skip validation when we receive cluster specification update from the Instaclustr Console. if pg.Annotations[models.ExternalChangesAnnotation] == models.True { return nil @@ -237,7 +238,7 @@ func (pgv *pgValidator) validatePostgreSQLUsers(ctx context.Context, pg *Postgre } } - if !externalIPExists || pg.Spec.PrivateNetworkCluster { + if !externalIPExists || pg.Spec.PrivateNetwork { return fmt.Errorf("cannot create PostgreSQL user, if your cluster is private or has no external ips " + "you need to configure peering and remove user references from cluster specification") } @@ -273,6 +274,7 @@ type immutablePostgreSQLDCFields struct { type specificPostgreSQLDC struct { ClientEncryption bool + NumberOfNodes int } func (pg *PostgreSQL) ValidateDefaultUserPassword(password string) bool { @@ -329,6 +331,10 @@ func (pgs *PgSpec) ValidateImmutableFieldsUpdate(oldSpec PgSpec) error { return err } + if !slices.Equals(pgs.Extensions, oldSpec.Extensions) { + return fmt.Errorf("spec.extensions are immutable") + } + return nil } @@ -351,7 +357,7 @@ func (pgs *PgSpec) validateImmutableDCsFieldsUpdate(oldSpec PgSpec) error { return err } - err = newDC.validateImmutableCloudProviderSettingsUpdate(oldDC.CloudProviderSettings) + err = newDC.validateImmutableCloudProviderSettingsUpdate(&oldDC.GenericDataCentreSpec) if err != nil { return err } @@ -366,7 +372,7 @@ func (pgs *PgSpec) validateImmutableDCsFieldsUpdate(oldSpec PgSpec) error { return err } - if newDC.NodesNumber != oldDC.NodesNumber { + if newDC.NumberOfNodes != oldDC.NumberOfNodes { return models.ErrImmutableNodesNumber } @@ -408,7 +414,7 @@ func (pgs *PgSpec) newImmutableFields() *immutablePostgreSQLFields { specificPostgreSQLFields: specificPostgreSQLFields{ SynchronousModeStrict: pgs.SynchronousModeStrict, }, - immutableCluster: pgs.Cluster.newImmutableFields(), + immutableCluster: pgs.GenericClusterSpec.immutableFields(), } } @@ -423,6 +429,7 @@ func (pdc *PgDataCentre) newImmutableFields() *immutablePostgreSQLDCFields { }, specificPostgreSQLDC: specificPostgreSQLDC{ ClientEncryption: pdc.ClientEncryption, + NumberOfNodes: pdc.NumberOfNodes, }, } } diff --git a/apis/clusters/v1beta1/structs.go b/apis/clusters/v1beta1/structs.go index c7a3f55d0..1ee67ea71 100644 --- a/apis/clusters/v1beta1/structs.go +++ b/apis/clusters/v1beta1/structs.go @@ -477,16 +477,6 @@ func (dc *DataCentre) SetDefaultValues() { } } -func (c *Cluster) newImmutableFields() immutableCluster { - return immutableCluster{ - Name: c.Name, - Version: c.Version, - PCICompliance: c.PCICompliance, - PrivateNetworkCluster: c.PrivateNetworkCluster, - SLATier: c.SLATier, - } -} - func (cs *ClusterStatus) AreMaintenanceEventStatusesEqual( iEventStatuses []*clusterresource.ClusteredMaintenanceEventStatus, ) bool { diff --git a/apis/clusters/v1beta1/zz_generated.deepcopy.go b/apis/clusters/v1beta1/zz_generated.deepcopy.go index d46e85410..bd948d15a 100644 --- a/apis/clusters/v1beta1/zz_generated.deepcopy.go +++ b/apis/clusters/v1beta1/zz_generated.deepcopy.go @@ -2198,7 +2198,7 @@ func (in *PgBouncer) DeepCopy() *PgBouncer { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PgDataCentre) DeepCopyInto(out *PgDataCentre) { *out = *in - in.DataCentre.DeepCopyInto(&out.DataCentre) + in.GenericDataCentreSpec.DeepCopyInto(&out.GenericDataCentreSpec) if in.InterDataCentreReplication != nil { in, out := &in.InterDataCentreReplication, &out.InterDataCentreReplication *out = make([]*InterDataCentreReplication, len(*in)) @@ -2244,6 +2244,33 @@ func (in *PgDataCentre) DeepCopy() *PgDataCentre { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PgDataCentreStatus) DeepCopyInto(out *PgDataCentreStatus) { + *out = *in + in.GenericDataCentreStatus.DeepCopyInto(&out.GenericDataCentreStatus) + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]*Node, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Node) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PgDataCentreStatus. +func (in *PgDataCentreStatus) DeepCopy() *PgDataCentreStatus { + if in == nil { + return nil + } + out := new(PgDataCentreStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PgExtension) DeepCopyInto(out *PgExtension) { *out = *in @@ -2307,12 +2334,19 @@ func (in *PgRestoreFrom) DeepCopy() *PgRestoreFrom { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PgSpec) DeepCopyInto(out *PgSpec) { *out = *in + in.GenericClusterSpec.DeepCopyInto(&out.GenericClusterSpec) + if in.ClusterConfigurations != nil { + in, out := &in.ClusterConfigurations, &out.ClusterConfigurations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.PgRestoreFrom != nil { in, out := &in.PgRestoreFrom, &out.PgRestoreFrom *out = new(PgRestoreFrom) (*in).DeepCopyInto(*out) } - in.Cluster.DeepCopyInto(&out.Cluster) if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*PgDataCentre, len(*in)) @@ -2324,13 +2358,6 @@ func (in *PgSpec) DeepCopyInto(out *PgSpec) { } } } - if in.ClusterConfigurations != nil { - in, out := &in.ClusterConfigurations, &out.ClusterConfigurations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } if in.UserRefs != nil { in, out := &in.UserRefs, &out.UserRefs *out = make([]*apiextensions.ObjectReference, len(*in)) @@ -2373,7 +2400,18 @@ func (in *PgSpec) DeepCopy() *PgSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PgStatus) DeepCopyInto(out *PgStatus) { *out = *in - in.ClusterStatus.DeepCopyInto(&out.ClusterStatus) + in.GenericStatus.DeepCopyInto(&out.GenericStatus) + if in.DataCentres != nil { + in, out := &in.DataCentres, &out.DataCentres + *out = make([]*PgDataCentreStatus, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(PgDataCentreStatus) + (*in).DeepCopyInto(*out) + } + } + } if in.DefaultUserSecretRef != nil { in, out := &in.DefaultUserSecretRef, &out.DefaultUserSecretRef *out = new(apiextensions.ObjectReference) diff --git a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml index 1fcf88cea..0974b2c51 100644 --- a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml +++ b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml @@ -56,26 +56,99 @@ spec: items: properties: accountName: + default: INSTACLUSTR + description: For customers running in their own account. Your + provider account can be found on the Create Cluster page on + the Instaclustr Console, or the "Provider Account" property + on any existing cluster. For customers provisioning on Instaclustr's + cloud provider accounts, this property may be omitted. type: string + awsSettings: + description: AWS specific settings for the Data Centre. Cannot + be provided with GCP or Azure settings. + items: + properties: + backupBucket: + description: Specify the S3 bucket to use for storing + backup data for the cluster data centre. Only available + for customers running in their own cloud provider accounts. + Currently supported for OpenSearch clusters only. + type: string + customVirtualNetworkId: + description: VPC ID into which the Data Centre will be + provisioned. The Data Centre's network allocation must + match the IPv4 CIDR block of the specified VPC. + type: string + encryptionKey: + description: ID of a KMS encryption key to encrypt data + on nodes. KMS encryption key must be set in Cluster + Resources through the Instaclustr Console before provisioning + an encrypted Data Centre. + type: string + type: object + maxItems: 1 + type: array + azureSettings: + description: Azure specific settings for the Data Centre. Cannot + be provided with AWS or GCP settings. + items: + properties: + customVirtualNetworkId: + description: VNet ID into which the Data Centre will be + provisioned. The VNet must have an available address + space for the Data Centre's network allocation to be + appended to the VNet. Currently supported for PostgreSQL + clusters only. + type: string + resourceGroup: + description: The name of the Azure Resource Group into + which the Data Centre will be provisioned. + type: string + storageNetwork: + description: 'The private network address block to be + used for the storage network. This is only used for + certain node sizes, currently limited to those which + use Azure NetApp Files: for all other node sizes, this + field should not be provided. The network must have + a prefix length between /16 and /28, and must be part + of a private address range.' + type: string + type: object + maxItems: 1 + type: array clientEncryption: description: PostgreSQL options type: boolean cloudProvider: + description: Name of a cloud provider service. type: string - cloudProviderSettings: + gcpSettings: + description: GCP specific settings for the Data Centre. Cannot + be provided with AWS or Azure settings. items: properties: - backupBucket: - type: string customVirtualNetworkId: + description: "Network name or a relative Network or Subnetwork + URI. The Data Centre's network allocation must match + the IPv4 CIDR block of the specified subnet. \n Examples: + Network URI: projects/{riyoa-gcp-project-name}/global/networks/{network-name}. + Network name: {network-name}, equivalent to projects/{riyoa-gcp-project-name}/global/networks/{network-name}. + Same-project subnetwork URI: projects/{riyoa-gcp-project-name}/regions/{region-id}/subnetworks/{subnetwork-name}. + Shared VPC subnetwork URI: projects/{riyoa-gcp-host-project-name}/regions/{region-id}/subnetworks/{subnetwork-name}." type: string disableSnapshotAutoExpiry: + description: Specify whether the GCS backup bucket should + automatically expire data after 7 days or not. Setting + this to true will disable automatic expiry and will + allow for creation of custom snapshot repositories with + customisable retention using the Index Management Plugin. + The storage will have to be manually cleared after the + cluster is deleted. Only available for customers running + in their own cloud provider accounts. Currently supported + for OpenSearch clusters only. type: boolean - diskEncryptionKey: - type: string - resourceGroup: - type: string type: object + maxItems: 1 type: array interDataCentreReplication: items: @@ -96,12 +169,18 @@ spec: type: object type: array name: + description: A logical name for the data centre within a cluster. + These names must be unique in the cluster. type: string network: + description: The private network address block for the Data + Centre specified using CIDR address notation. The network + must have a prefix length between /12 and /22 and must be + part of a private address space. type: string nodeSize: type: string - nodesNumber: + numberOfNodes: type: integer pgBouncer: items: @@ -116,18 +195,25 @@ spec: type: object type: array region: + description: Region of the Data Centre. type: string tags: additionalProperties: type: string + description: List of tags to apply to the Data Centre. Tags + are metadata labels which allow you to identify, categorize + and filter clusters. This can be useful for grouping together + clusters into applications, environments, or any category + that you require. type: object required: - clientEncryption - cloudProvider - intraDataCentreReplication + - name - network - nodeSize - - nodesNumber + - numberOfNodes - region type: object type: array @@ -147,18 +233,9 @@ spec: - name type: object type: array - x-kubernetes-validations: - - message: extensions cannot be changed after it is set - rule: self == oldSelf name: description: Name [ 3 .. 32 ] characters. type: string - pciCompliance: - description: The PCI compliance standards relate to the security of - user data and transactional information. Can only be applied clusters - provisioned on AWS_VPC, running Cassandra, Kafka, Elasticsearch - and Redis. - type: boolean pgRestoreFrom: properties: cdsConfigs: @@ -201,7 +278,7 @@ spec: required: - clusterId type: object - privateNetworkCluster: + privateNetwork: type: boolean resizeSettings: items: @@ -249,21 +326,14 @@ spec: version: type: string type: object - x-kubernetes-validations: - - message: extensions cannot be changed after it is set - rule: has(self.extensions) == has(oldSelf.extensions) status: description: PgStatus defines the observed state of PostgreSQL properties: - cdcid: - type: string currentClusterOperationStatus: type: string dataCentres: items: properties: - encryptionKeyId: - type: string id: type: string name: @@ -289,21 +359,8 @@ spec: type: string type: object type: array - nodesNumber: + numberOfNodes: type: integer - privateLink: - items: - properties: - advertisedHostname: - type: string - endPointServiceId: - type: string - endPointServiceName: - type: string - required: - - advertisedHostname - type: object - type: array resizeOperations: items: properties: @@ -360,6 +417,9 @@ spec: type: array status: type: string + required: + - nodes + - numberOfNodes type: object type: array id: @@ -450,19 +510,8 @@ spec: type: array type: object type: array - options: - properties: - dataNodeSize: - type: string - masterNodeSize: - type: string - openSearchDashboardsNodeSize: - type: string - type: object state: type: string - twoFactorDeleteEnabled: - type: boolean userRefs: description: ObjectReference is namespaced reference to an object properties: @@ -474,6 +523,8 @@ spec: - name - namespace type: object + required: + - dataCentres type: object type: object served: true diff --git a/controllers/clusters/datatest/postgresql_v1beta1.yaml b/controllers/clusters/datatest/postgresql_v1beta1.yaml index b96b86dd2..56a5a536a 100644 --- a/controllers/clusters/datatest/postgresql_v1beta1.yaml +++ b/controllers/clusters/datatest/postgresql_v1beta1.yaml @@ -13,7 +13,7 @@ spec: network: "10.1.0.0/16" cloudProvider: "AWS_VPC" nodeSize: "PGS-DEV-t4g.small-5" - nodesNumber: 2 + numberOfNodes: 2 accountName: "accountNameTEST" # cloudProviderSettings: # - customVirtualNetworkId: "vpc-12345678" diff --git a/controllers/clusters/postgresql_controller.go b/controllers/clusters/postgresql_controller.go index 6d2c50b73..c972ba737 100644 --- a/controllers/clusters/postgresql_controller.go +++ b/controllers/clusters/postgresql_controller.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "strconv" "github.com/go-logr/logr" @@ -126,119 +127,129 @@ func (r *PostgreSQLReconciler) Reconcile(ctx context.Context, req ctrl.Request) } } -func (r *PostgreSQLReconciler) handleCreateCluster( - ctx context.Context, - pg *v1beta1.PostgreSQL, - l logr.Logger, -) (reconcile.Result, error) { - l = l.WithName("PostgreSQL creation event") +func (r *PostgreSQLReconciler) createFromRestore(pg *v1beta1.PostgreSQL, l logr.Logger) (*models.PGCluster, error) { + l.Info( + "Creating PostgreSQL cluster from backup", + "original cluster ID", pg.Spec.PgRestoreFrom.ClusterID, + ) - var err error + id, err := r.API.RestoreCluster(pg.RestoreInfoToInstAPI(pg.Spec.PgRestoreFrom), models.PgRestoreValue) + if err != nil { + return nil, fmt.Errorf("failed to restore cluster, err: %v", err) + } - patch := pg.NewPatch() - if pg.Status.ID == "" { - if pg.Spec.HasRestore() { - l.Info( - "Creating PostgreSQL cluster from backup", - "original cluster ID", pg.Spec.PgRestoreFrom.ClusterID, - ) + r.EventRecorder.Eventf( + pg, models.Normal, models.Created, + "Cluster restore request is sent. Original cluster ID: %s, new cluster ID: %s", + pg.Spec.PgRestoreFrom.ClusterID, + pg.Status.ID, + ) - pg.Status.ID, err = r.API.RestoreCluster(pg.RestoreInfoToInstAPI(pg.Spec.PgRestoreFrom), models.PgRestoreValue) - if err != nil { - l.Error(err, "Cannot restore PostgreSQL cluster from backup", - "original cluster ID", pg.Spec.PgRestoreFrom.ClusterID, - ) + instaModel, err := r.API.GetPostgreSQL(id) + if err != nil { + return nil, fmt.Errorf("failed to get cluster details, err: %c", err) + } - r.EventRecorder.Eventf( - pg, models.Warning, models.CreationFailed, - "Cluster restoration from backup on Instaclustr cloud is failed. Reason: %v", - err, - ) + return instaModel, nil +} - return reconcile.Result{}, err - } +func (r *PostgreSQLReconciler) createPostgreSQL(pg *v1beta1.PostgreSQL, l logr.Logger) (*models.PGCluster, error) { + l.Info( + "Creating PostgreSQL cluster", + "cluster name", pg.Spec.Name, + "data centres", pg.Spec.DataCentres, + ) - r.EventRecorder.Eventf( - pg, models.Normal, models.Created, - "Cluster restore request is sent. Original cluster ID: %s, new cluster ID: %s", - pg.Spec.PgRestoreFrom.ClusterID, - pg.Status.ID, - ) - } else { - l.Info( - "Creating PostgreSQL cluster", - "cluster name", pg.Spec.Name, - "data centres", pg.Spec.DataCentres, - ) + b, err := r.API.CreateClusterRaw(instaclustr.PGSQLEndpoint, pg.Spec.ToInstAPI()) + if err != nil { + return nil, fmt.Errorf("failed to create cluster, err: %v", err) + } - pgSpec := pg.Spec.ToInstAPI() + r.EventRecorder.Eventf( + pg, models.Normal, models.Created, + "Cluster creation request is sent. Cluster ID: %s", + pg.Status.ID, + ) - pg.Status.ID, err = r.API.CreateCluster(instaclustr.PGSQLEndpoint, pgSpec) - if err != nil { - l.Error( - err, "Cannot create PostgreSQL cluster", - "spec", pg.Spec, - ) + var instaModel models.PGCluster + err = json.Unmarshal(b, &instaModel) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal body to models.PGCluster, err: %v", err) + } - r.EventRecorder.Eventf( - pg, models.Warning, models.CreationFailed, - "Cluster creation on the Instaclustr is failed. Reason: %v", - err, - ) + return &instaModel, nil +} - return reconcile.Result{}, err - } +func (r *PostgreSQLReconciler) createCluster(ctx context.Context, pg *v1beta1.PostgreSQL, l logr.Logger) error { + var instaModel *models.PGCluster + var err error - r.EventRecorder.Eventf( - pg, models.Normal, models.Created, - "Cluster creation request is sent. Cluster ID: %s", - pg.Status.ID, + if pg.Spec.HasRestore() { + instaModel, err = r.createFromRestore(pg, l) + } else { + instaModel, err = r.createPostgreSQL(pg, l) + } + if err != nil { + return err + } + + patch := pg.NewPatch() + + pg.Spec.FromInstAPI(instaModel) + pg.Annotations[models.ResourceStateAnnotation] = models.SyncingEvent + err = r.Patch(ctx, pg, patch) + if err != nil { + return fmt.Errorf("failed to patch cluster spec, err: %v", err) + } + + pg.Status.FromInstAPI(instaModel) + err = r.Status().Patch(ctx, pg, patch) + if err != nil { + return fmt.Errorf("failed to patch cluster status, err: %v", err) + } + + err = r.createDefaultPassword(ctx, pg, l) + if err != nil { + return err + } + + return nil +} + +func (r *PostgreSQLReconciler) handleCreateCluster( + ctx context.Context, + pg *v1beta1.PostgreSQL, + l logr.Logger, +) (reconcile.Result, error) { + l = l.WithName("PostgreSQL creation event") + + if pg.Status.ID == "" { + err := r.createCluster(ctx, pg, l) + if err != nil { + r.EventRecorder.Eventf(pg, models.Warning, models.CreationFailed, + "Failed to create PostgreSQL cluster. Reason: %w", err, ) + return reconcile.Result{}, err } + } - err = r.Status().Patch(ctx, pg, patch) + if pg.Status.State != models.DeletedStatus { + patch := pg.NewPatch() + controllerutil.AddFinalizer(pg, models.DeletionFinalizer) + pg.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent + err := r.Patch(ctx, pg, patch) if err != nil { - l.Error(err, "Cannot patch PostgreSQL resource status", + l.Error(err, "Cannot patch PostgreSQL resource", "cluster name", pg.Spec.Name, - "status", pg.Status, - ) + "status", pg.Status) r.EventRecorder.Eventf( pg, models.Warning, models.PatchFailed, - "Cluster resource status patch is failed. Reason: %v", - err, - ) + "Cluster resource patch is failed. Reason: %v", err) return reconcile.Result{}, err } - l.Info( - "PostgreSQL resource has been created", - "cluster name", pg.Name, - "cluster ID", pg.Status.ID, - "kind", pg.Kind, - "api version", pg.APIVersion, - "namespace", pg.Namespace, - ) - } - - controllerutil.AddFinalizer(pg, models.DeletionFinalizer) - - pg.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent - err = r.Patch(ctx, pg, patch) - if err != nil { - l.Error(err, "Cannot patch PostgreSQL resource", - "cluster name", pg.Spec.Name, - "status", pg.Status) - - r.EventRecorder.Eventf( - pg, models.Warning, models.PatchFailed, - "Cluster resource patch is failed. Reason: %v", err) - - return reconcile.Result{}, err - } - - if pg.Status.State != models.DeletedStatus { err = r.startClusterStatusJob(pg) if err != nil { l.Error(err, "Cannot start PostgreSQL cluster status check job", @@ -282,29 +293,13 @@ func (r *PostgreSQLReconciler) handleCreateCluster( ) } - err = r.createDefaultPassword(ctx, pg, l) - if err != nil { - l.Error(err, "Cannot create default password for PostgreSQL", - "cluster name", pg.Spec.Name, - "clusterID", pg.Status.ID, - ) - - r.EventRecorder.Eventf( - pg, models.Warning, models.CreationFailed, - "Default user secret creation on the Instaclustr is failed. Reason: %v", - err, - ) - - return reconcile.Result{}, err - } - return models.ExitReconcile, nil } func (r *PostgreSQLReconciler) handleUpdateCluster(ctx context.Context, pg *v1beta1.PostgreSQL, req ctrl.Request, l logr.Logger) (reconcile.Result, error) { l = l.WithName("PostgreSQL update event") - iData, err := r.API.GetPostgreSQL(pg.Status.ID) + instaModel, err := r.API.GetPostgreSQL(pg.Status.ID) if err != nil { l.Error( err, "Cannot get PostgreSQL cluster status from the Instaclustr API", @@ -320,28 +315,15 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(ctx context.Context, pg *v1be return reconcile.Result{}, err } - iPg, err := pg.FromInstAPI(iData) - if err != nil { - l.Error( - err, "Cannot convert PostgreSQL cluster status from the Instaclustr API", - "cluster name", pg.Spec.Name, - "cluster ID", pg.Status.ID, - ) - - r.EventRecorder.Eventf( - pg, models.Warning, models.ConversionFailed, - "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", - err, - ) - return reconcile.Result{}, err - } + iPg := &v1beta1.PostgreSQL{} + iPg.FromInstAPI(instaModel) if pg.Annotations[models.ExternalChangesAnnotation] == models.True || r.RateLimiter.NumRequeues(req) == rlimiter.DefaultMaxTries { return handleExternalChanges[v1beta1.PgSpec](r.EventRecorder, r.Client, pg, iPg, l) } - if pg.Spec.ClusterSettingsNeedUpdate(iPg.Spec.Cluster) { + if pg.Spec.ClusterSettingsNeedUpdate(&iPg.Spec.GenericClusterSpec) { l.Info("Updating cluster settings", "instaclustr description", iPg.Spec.Description, "instaclustr two factor delete", iPg.Spec.TwoFactorDelete) @@ -357,7 +339,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster(ctx context.Context, pg *v1be } } - if !pg.Spec.AreDCsEqual(iPg.Spec.DataCentres) { + if !pg.Spec.DCsEqual(iPg.Spec.DataCentres) { l.Info("Update request to Instaclustr API has been sent", "spec data centres", pg.Spec.DataCentres, "resize settings", pg.Spec.ResizeSettings, @@ -736,7 +718,7 @@ func (r *PostgreSQLReconciler) startClusterBackupsJob(pg *v1beta1.PostgreSQL) er } func (r *PostgreSQLReconciler) newWatchStatusJob(pg *v1beta1.PostgreSQL) scheduler.Job { - l := log.Log.WithValues("component", "postgreSQLStatusClusterJob") + l := log.Log.WithValues("syncJob", pg.GetJobID(scheduler.StatusChecker), "clusterID", pg.Status.ID) return func() error { namespacedName := client.ObjectKeyFromObject(pg) @@ -756,7 +738,7 @@ func (r *PostgreSQLReconciler) newWatchStatusJob(pg *v1beta1.PostgreSQL) schedul return err } - instPGData, err := r.API.GetPostgreSQL(pg.Status.ID) + instaModel, err := r.API.GetPostgreSQL(pg.Status.ID) if err != nil { if errors.Is(err, instaclustr.NotFound) { if pg.DeletionTimestamp != nil { @@ -775,26 +757,16 @@ func (r *PostgreSQLReconciler) newWatchStatusJob(pg *v1beta1.PostgreSQL) schedul return err } - iPg, err := pg.FromInstAPI(instPGData) - if err != nil { - l.Error(err, "Cannot convert PostgreSQL cluster status from Instaclustr", - "cluster name", pg.Spec.Name, - "clusterID", pg.Status.ID, - ) + iPg := &v1beta1.PostgreSQL{} + iPg.FromInstAPI(instaModel) - return err - } - - if !areStatusesEqual(&iPg.Status.ClusterStatus, &pg.Status.ClusterStatus) { - l.Info("Updating PostgreSQL cluster status", - "new cluster status", iPg.Status, - "old cluster status", pg.Status, - ) + if !pg.Status.Equals(&iPg.Status) { + l.Info("Updating PostgreSQL cluster status") - areDCsEqual := areDataCentresEqual(iPg.Status.ClusterStatus.DataCentres, pg.Status.ClusterStatus.DataCentres) + areDCsEqual := pg.Status.DCsEqual(iPg.Status.DataCentres) patch := pg.NewPatch() - pg.Status.ClusterStatus = iPg.Status.ClusterStatus + pg.Status.FromInstAPI(instaModel) err = r.Status().Patch(context.Background(), pg, patch) if err != nil { l.Error(err, "Cannot patch PostgreSQL cluster status", @@ -808,14 +780,14 @@ func (r *PostgreSQLReconciler) newWatchStatusJob(pg *v1beta1.PostgreSQL) schedul if !areDCsEqual { var nodes []*v1beta1.Node - for _, dc := range iPg.Status.ClusterStatus.DataCentres { + for _, dc := range iPg.Status.DataCentres { nodes = append(nodes, dc.Nodes...) } err = exposeservice.Create(r.Client, pg.Name, pg.Namespace, - pg.Spec.PrivateNetworkCluster, + pg.Spec.PrivateNetwork, nodes, models.PgConnectionPort) if err != nil { @@ -904,7 +876,7 @@ func (r *PostgreSQLReconciler) newWatchStatusJob(pg *v1beta1.PostgreSQL) schedul func (r *PostgreSQLReconciler) createDefaultPassword(ctx context.Context, pg *v1beta1.PostgreSQL, l logr.Logger) error { patch := pg.NewPatch() - iData, err := r.API.GetPostgreSQL(pg.Status.ID) + instaModel, err := r.API.GetPostgreSQL(pg.Status.ID) if err != nil { l.Error( err, "Cannot get PostgreSQL cluster status from the Instaclustr API", @@ -929,23 +901,7 @@ func (r *PostgreSQLReconciler) createDefaultPassword(ctx context.Context, pg *v1 return nil } - defaultUserPassword, err := pg.DefaultPasswordFromInstAPI(iData) - if err != nil { - l.Error(err, "Cannot get default user creds for PostgreSQL cluster from the Instaclustr API", - "cluster name", pg.Spec.Name, - "clusterID", pg.Status.ID, - ) - - r.EventRecorder.Eventf( - pg, models.Warning, models.FetchFailed, - "Default user password fetch from the Instaclustr API is failed. Reason: %v", - err, - ) - - return err - } - - secret := pg.NewUserSecret(defaultUserPassword) + secret := pg.NewUserSecret(instaModel.DefaultUserPassword) err = r.Client.Create(context.TODO(), secret) if err != nil { l.Error(err, "Cannot create PostgreSQL default user secret", @@ -1314,6 +1270,10 @@ func (r *PostgreSQLReconciler) SetupWithManager(mgr ctrl.Manager) error { newObj := event.ObjectNew.(*v1beta1.PostgreSQL) + if newObj.Status.ID == "" && newObj.Annotations[models.ResourceStateAnnotation] == models.SyncingEvent { + return false + } + if newObj.Status.ID == "" { newObj.Annotations[models.ResourceStateAnnotation] = models.CreatingEvent return true @@ -1356,7 +1316,7 @@ func (r *PostgreSQLReconciler) reconcileMaintenanceEvents(ctx context.Context, p return err } - if !pg.Status.AreMaintenanceEventStatusesEqual(iMEStatuses) { + if !pg.Status.MaintenanceEventsEqual(iMEStatuses) { patch := pg.NewPatch() pg.Status.MaintenanceEvents = iMEStatuses err = r.Status().Patch(ctx, pg, patch) diff --git a/pkg/instaclustr/client.go b/pkg/instaclustr/client.go index 34a6896cd..da6611529 100644 --- a/pkg/instaclustr/client.go +++ b/pkg/instaclustr/client.go @@ -1856,7 +1856,7 @@ func (c *Client) RestoreCluster(restoreData any, clusterKind string) (string, er return response.ClusterID, nil } -func (c *Client) GetPostgreSQL(id string) ([]byte, error) { +func (c *Client) GetPostgreSQL(id string) (*models.PGCluster, error) { url := c.serverHostname + PGSQLEndpoint + id resp, err := c.DoRequest(url, http.MethodGet, nil) if err != nil { @@ -1877,7 +1877,13 @@ func (c *Client) GetPostgreSQL(id string) ([]byte, error) { return nil, fmt.Errorf("status code: %d, message: %s", resp.StatusCode, body) } - return body, nil + var pg models.PGCluster + err = json.Unmarshal(body, &pg) + if err != nil { + return nil, err + } + + return &pg, nil } func (c *Client) UpdatePostgreSQL(id string, r *models.PGClusterUpdate) error { diff --git a/pkg/instaclustr/interfaces.go b/pkg/instaclustr/interfaces.go index 70f751bab..146462081 100644 --- a/pkg/instaclustr/interfaces.go +++ b/pkg/instaclustr/interfaces.go @@ -86,7 +86,7 @@ type API interface { UpdateKafkaConnect(id string, kc models.KafkaConnectAPIUpdate) error GetZookeeper(id string) ([]byte, error) RestoreCluster(restoreData any, clusterKind string) (string, error) - GetPostgreSQL(id string) ([]byte, error) + GetPostgreSQL(id string) (*models.PGCluster, error) UpdatePostgreSQL(id string, r *models.PGClusterUpdate) error GetPostgreSQLConfigs(id string) ([]*models.PGConfigs, error) CreatePostgreSQLConfiguration(id, name, value string) error diff --git a/pkg/instaclustr/mock/client.go b/pkg/instaclustr/mock/client.go index 3860e9f2b..99fdebea2 100644 --- a/pkg/instaclustr/mock/client.go +++ b/pkg/instaclustr/mock/client.go @@ -318,7 +318,7 @@ func (c *mockClient) GetZookeeper(id string) ([]byte, error) { panic("GetZookeeper: is not implemented") } -func (c *mockClient) GetPostgreSQL(id string) ([]byte, error) { +func (c *mockClient) GetPostgreSQL(id string) (*models.PGCluster, error) { panic("GetPostgreSQL: is not implemented") } diff --git a/pkg/models/postgresql_apiv2.go b/pkg/models/postgresql_apiv2.go index e73dbd1fa..7d48611e5 100644 --- a/pkg/models/postgresql_apiv2.go +++ b/pkg/models/postgresql_apiv2.go @@ -17,19 +17,15 @@ limitations under the License. package models type PGCluster struct { - ID string `json:"id,omitempty"` - Name string `json:"name"` - PostgreSQLVersion string `json:"postgresqlVersion"` - DataCentres []*PGDataCentre `json:"dataCentres"` - SynchronousModeStrict bool `json:"synchronousModeStrict"` - PrivateNetworkCluster bool `json:"privateNetworkCluster"` - SLATier string `json:"slaTier"` - TwoFactorDelete []*TwoFactorDelete `json:"twoFactorDelete,omitempty"` - PCIComplianceMode bool `json:"pciComplianceMode,omitempty"` - CurrentClusterOperationStatus string `json:"currentClusterOperationStatus,omitempty"` - Status string `json:"status,omitempty"` - Description string `json:"description,omitempty"` - Extensions []*PGExtension `json:"extensions,omitempty"` + GenericClusterFields `json:",inline"` + + PostgreSQLVersion string `json:"postgresqlVersion"` + DefaultUserPassword string `json:"defaultUserPassword,omitempty"` + PCIComplianceMode bool `json:"pciComplianceMode,omitempty"` + SynchronousModeStrict bool `json:"synchronousModeStrict"` + DataCentres []*PGDataCentre `json:"dataCentres"` + TwoFactorDelete []*TwoFactorDelete `json:"twoFactorDelete,omitempty"` + Extensions []*PGExtension `json:"extensions,omitempty"` } type PGBouncer struct { @@ -38,11 +34,16 @@ type PGBouncer struct { } type PGDataCentre struct { - DataCentre `json:",inline"` - ClientToClusterEncryption bool `json:"clientToClusterEncryption"` + GenericDataCentreFields `json:",inline"` + + NumberOfNodes int `json:"numberOfNodes"` + NodeSize string `json:"nodeSize"` + ClientToClusterEncryption bool `json:"clientToClusterEncryption"` + InterDataCentreReplication []*PGInterDCReplication `json:"interDataCentreReplication,omitempty"` IntraDataCentreReplication []*PGIntraDCReplication `json:"intraDataCentreReplication"` PGBouncer []*PGBouncer `json:"pgBouncer,omitempty"` + Nodes []*Node `json:"nodes,omitempty"` } type PGInterDCReplication struct {