Skip to content

Commit

Permalink
align sync and update logs
Browse files Browse the repository at this point in the history
  • Loading branch information
FxKu committed Aug 23, 2024
1 parent cc9074c commit 05c922d
Show file tree
Hide file tree
Showing 11 changed files with 58 additions and 55 deletions.
10 changes: 5 additions & 5 deletions pkg/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -1014,7 +1014,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {

initUsers := !sameUsers || !sameRotatedUsers || needPoolerUser || needStreamUser
if initUsers {
c.logger.Debugf("initialize users")
c.logger.Debug("initialize users")
if err := c.initUsers(); err != nil {
c.logger.Errorf("could not init users - skipping sync of secrets and databases: %v", err)
userInitFailed = true
Expand All @@ -1023,7 +1023,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
}
}
if initUsers || annotationsChanged {
c.logger.Debugf("syncing secrets")
c.logger.Debug("syncing secrets")
//TODO: mind the secrets of the deleted/new users
if err := c.syncSecrets(); err != nil {
c.logger.Errorf("could not sync secrets: %v", err)
Expand Down Expand Up @@ -1065,7 +1065,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {

// create if it did not exist
if !oldSpec.Spec.EnableLogicalBackup && newSpec.Spec.EnableLogicalBackup {
c.logger.Debugf("creating backup cron job")
c.logger.Debug("creating backup cron job")
if err := c.createLogicalBackupJob(); err != nil {
c.logger.Errorf("could not create a k8s cron job for logical backups: %v", err)
updateFailed = true
Expand All @@ -1075,7 +1075,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {

// delete if no longer needed
if oldSpec.Spec.EnableLogicalBackup && !newSpec.Spec.EnableLogicalBackup {
c.logger.Debugf("deleting backup cron job")
c.logger.Debug("deleting backup cron job")
if err := c.deleteLogicalBackupJob(); err != nil {
c.logger.Errorf("could not delete a k8s cron job for logical backups: %v", err)
updateFailed = true
Expand All @@ -1095,7 +1095,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {

// Roles and Databases
if !userInitFailed && !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) {
c.logger.Debugf("syncing roles")
c.logger.Debug("syncing roles")
if err := c.syncRoles(); err != nil {
c.logger.Errorf("could not sync roles: %v", err)
updateFailed = true
Expand Down
4 changes: 2 additions & 2 deletions pkg/cluster/connection_pooler.go
Original file line number Diff line number Diff line change
Expand Up @@ -591,7 +591,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
// Lack of connection pooler objects is not a fatal error, just log it if
// it was present before in the manifest
if c.ConnectionPooler[role] == nil || role == "" {
c.logger.Debugf("no connection pooler to delete")
c.logger.Debug("no connection pooler to delete")
return nil
}

Expand Down Expand Up @@ -622,7 +622,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
// Repeat the same for the service object
service := c.ConnectionPooler[role].Service
if service == nil {
c.logger.Debugf("no connection pooler service object to delete")
c.logger.Debug("no connection pooler service object to delete")
} else {

err = c.KubeClient.
Expand Down
2 changes: 1 addition & 1 deletion pkg/cluster/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ func (c *Cluster) pgConnectionString(dbname string) string {

func (c *Cluster) databaseAccessDisabled() bool {
if !c.OpConfig.EnableDBAccess {
c.logger.Debugf("database access is disabled")
c.logger.Debug("database access is disabled")
}

return !c.OpConfig.EnableDBAccess
Expand Down
2 changes: 1 addition & 1 deletion pkg/cluster/majorversionupgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ func (c *Cluster) majorVersionUpgrade() error {
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
upgradeCommand := fmt.Sprintf("set -o pipefail && /usr/bin/python3 /scripts/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log", numberOfPods)

c.logger.Debugf("checking if the spilo image runs with root or non-root (check for user id=0)")
c.logger.Debug("checking if the spilo image runs with root or non-root (check for user id=0)")
resultIdCheck, errIdCheck := c.ExecCommand(podName, "/bin/bash", "-c", "/usr/bin/id -u")
if errIdCheck != nil {
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "checking user id to run upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, errIdCheck)
Expand Down
10 changes: 5 additions & 5 deletions pkg/cluster/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func (c *Cluster) markRollingUpdateFlagForPod(pod *v1.Pod, msg string) error {
return nil
}

c.logger.Debugf("mark rolling update annotation for %s: reason %s", pod.Name, msg)
c.logger.Infof("mark rolling update annotation for %s: reason %s", pod.Name, msg)
flag := make(map[string]string)
flag[rollingUpdatePodAnnotationKey] = strconv.FormatBool(true)

Expand Down Expand Up @@ -110,7 +110,7 @@ func (c *Cluster) getRollingUpdateFlagFromPod(pod *v1.Pod) (flag bool) {
}

func (c *Cluster) deletePods() error {
c.logger.Debugln("deleting pods")
c.logger.Debug("deleting pods")
pods, err := c.listPods()
if err != nil {
return err
Expand All @@ -127,9 +127,9 @@ func (c *Cluster) deletePods() error {
}
}
if len(pods) > 0 {
c.logger.Debugln("pods have been deleted")
c.logger.Debug("pods have been deleted")
} else {
c.logger.Debugln("no pods to delete")
c.logger.Debug("no pods to delete")
}

return nil
Expand Down Expand Up @@ -230,7 +230,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
return fmt.Errorf("could not get node %q: %v", oldMaster.Spec.NodeName, err)
}
if !eol {
c.logger.Debugf("no action needed: master pod is already on a live node")
c.logger.Debug("no action needed: master pod is already on a live node")
return nil
}

Expand Down
17 changes: 9 additions & 8 deletions pkg/cluster/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
c.logger.Warningf("could not scale down: %v", err)
}
}
c.logger.Debugf("updating statefulset")
c.logger.Debug("updating statefulset")

patchData, err := specPatch(newStatefulSet.Spec)
if err != nil {
Expand Down Expand Up @@ -218,7 +218,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
}

statefulSetName := util.NameFromMeta(c.Statefulset.ObjectMeta)
c.logger.Debugf("replacing statefulset")
c.logger.Debug("replacing statefulset")

// Delete the current statefulset without deleting the pods
deletePropagationPolicy := metav1.DeletePropagationOrphan
Expand All @@ -232,7 +232,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
// make sure we clear the stored statefulset status if the subsequent create fails.
c.Statefulset = nil
// wait until the statefulset is truly deleted
c.logger.Debugf("waiting for the statefulset to be deleted")
c.logger.Debug("waiting for the statefulset to be deleted")

err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
func() (bool, error) {
Expand Down Expand Up @@ -266,7 +266,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {

func (c *Cluster) deleteStatefulSet() error {
c.setProcessName("deleting statefulset")
c.logger.Debugln("deleting statefulset")
c.logger.Debug("deleting statefulset")
if c.Statefulset == nil {
c.logger.Debug("there is no statefulset in the cluster")
return nil
Expand Down Expand Up @@ -349,7 +349,8 @@ func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newSe
}

func (c *Cluster) deleteService(role PostgresRole) error {
c.logger.Debugf("deleting service %s", role)
c.setProcessName("deleting service")
c.logger.Debugf("deleting %s service", role)

if c.Services[role] == nil {
c.logger.Debugf("No service for %s role was found, nothing to delete", role)
Expand Down Expand Up @@ -495,7 +496,7 @@ func (c *Cluster) deletePodDisruptionBudget() error {

func (c *Cluster) deleteEndpoint(role PostgresRole) error {
c.setProcessName("deleting endpoint")
c.logger.Debugln("deleting endpoint")
c.logger.Debugf("deleting %s endpoint", role)
if c.Endpoints[role] == nil {
c.logger.Debugf("there is no %s endpoint in the cluster", role)
return nil
Expand Down Expand Up @@ -543,7 +544,7 @@ func (c *Cluster) deletePatroniResources() error {

func (c *Cluster) deletePatroniConfigMap(suffix string) error {
c.setProcessName("deleting Patroni config map")
c.logger.Debugln("deleting Patroni config map")
c.logger.Debugf("deleting %s Patroni config map", suffix)
cm := c.PatroniConfigMaps[suffix]
if cm == nil {
c.logger.Debugf("there is no %s Patroni config map in the cluster", suffix)
Expand All @@ -565,7 +566,7 @@ func (c *Cluster) deletePatroniConfigMap(suffix string) error {

func (c *Cluster) deletePatroniEndpoint(suffix string) error {
c.setProcessName("deleting Patroni endpoint")
c.logger.Debugln("deleting Patroni endpoint")
c.logger.Debugf("deleting %s Patroni endpoint", suffix)
ep := c.PatroniEndpoints[suffix]
if ep == nil {
c.logger.Debugf("there is no %s Patroni endpoint in the cluster", suffix)
Expand Down
7 changes: 4 additions & 3 deletions pkg/cluster/streams.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,13 @@ func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) (p

func (c *Cluster) deleteStream(appId string) error {
c.setProcessName("deleting event stream")
c.logger.Debugf("deleting event stream with applicationId %s", appId)

err := c.KubeClient.FabricEventStreams(c.Streams[appId].Namespace).Delete(context.TODO(), c.Streams[appId].Name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("could not delete event stream %q with applicationId %s: %v", c.Streams[appId].Name, appId, err)
}
c.logger.Infof("event stream %q with applicationId %s has been successfully deleted", c.Streams[appId].Name, appId)
delete(c.Streams, appId)

return nil
Expand Down Expand Up @@ -308,7 +310,7 @@ func (c *Cluster) syncStreams() error {

_, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), constants.EventStreamCRDName, metav1.GetOptions{})
if k8sutil.ResourceNotFound(err) {
c.logger.Debugf("event stream CRD not installed, skipping")
c.logger.Debug("event stream CRD not installed, skipping")
return nil
}

Expand Down Expand Up @@ -473,7 +475,7 @@ func (c *Cluster) syncStream(appId string) error {
c.Streams[appId] = stream
}
if match, reason := c.compareStreams(&stream, desiredStreams); !match {
c.logger.Debugf("updating event streams with applicationId %s: %s", appId, reason)
c.logger.Infof("updating event streams with applicationId %s: %s", appId, reason)
desiredStreams.ObjectMeta = stream.ObjectMeta
updatedStream, err := c.updateStreams(desiredStreams)
if err != nil {
Expand Down Expand Up @@ -550,7 +552,6 @@ func (c *Cluster) cleanupRemovedStreams(appIds []string) error {
if err != nil {
errors = append(errors, fmt.Sprintf("failed deleting event streams with applicationId %s: %v", appId, err))
}
c.logger.Infof("event streams with applicationId %s have been successfully deleted", appId)
}
}

Expand Down
17 changes: 9 additions & 8 deletions pkg/cluster/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -300,6 +300,7 @@ func (c *Cluster) syncPatroniService() error {
err error
)
serviceName := fmt.Sprintf("%s-%s", c.Name, Patroni)
c.logger.Debugf("syncing %s service", serviceName)
c.setProcessName("syncing %s service", serviceName)

if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}); err == nil {
Expand All @@ -311,7 +312,7 @@ func (c *Cluster) syncPatroniService() error {
c.setProcessName("updating %v service", serviceName)
svc, err = c.KubeClient.Services(c.Namespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("could not update %s endpoint: %v", serviceName, err)
return fmt.Errorf("could not update %s service: %v", serviceName, err)
}
c.Services[Patroni] = svc
}
Expand Down Expand Up @@ -537,7 +538,7 @@ func (c *Cluster) syncStatefulSet() error {
if err != nil {
return fmt.Errorf("could not generate statefulset: %v", err)
}
c.logger.Debugf("syncing statefulsets")
c.logger.Debug("syncing statefulsets")
// check if there are still pods with a rolling update flag
for _, pod := range pods {
if c.getRollingUpdateFlagFromPod(&pod) {
Expand All @@ -552,7 +553,7 @@ func (c *Cluster) syncStatefulSet() error {
}

if len(podsToRecreate) > 0 {
c.logger.Debugf("%d / %d pod(s) still need to be rotated", len(podsToRecreate), len(pods))
c.logger.Infof("%d / %d pod(s) still need to be rotated", len(podsToRecreate), len(pods))
}

// statefulset is already there, make sure we use its definition in order to compare with the spec.
Expand Down Expand Up @@ -658,7 +659,7 @@ func (c *Cluster) syncStatefulSet() error {
// statefulset or those that got their configuration from the outdated statefulset)
if len(podsToRecreate) > 0 {
if isSafeToRecreatePods {
c.logger.Debugln("performing rolling update")
c.logger.Info("performing rolling update")
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Performing rolling update")
if err := c.recreatePods(podsToRecreate, switchoverCandidates); err != nil {
return fmt.Errorf("could not recreate pods: %v", err)
Expand Down Expand Up @@ -971,7 +972,7 @@ func (c *Cluster) syncStandbyClusterConfiguration() error {
// carries the request to change configuration through
for _, pod := range pods {
podName := util.NameFromMeta(pod.ObjectMeta)
c.logger.Debugf("patching Postgres config via Patroni API on pod %s with following options: %s",
c.logger.Infof("patching Postgres config via Patroni API on pod %s with following options: %s",
podName, standbyOptionsToSet)
if err = c.patroni.SetStandbyClusterParameters(&pod, standbyOptionsToSet); err == nil {
return nil
Expand All @@ -983,7 +984,7 @@ func (c *Cluster) syncStandbyClusterConfiguration() error {
}

func (c *Cluster) syncSecrets() error {
c.logger.Info("syncing secrets")
c.logger.Debug("syncing secrets")
c.setProcessName("syncing secrets")
generatedSecrets := c.generateUserSecrets()
retentionUsers := make([]string, 0)
Expand All @@ -993,7 +994,7 @@ func (c *Cluster) syncSecrets() error {
secret, err := c.KubeClient.Secrets(generatedSecret.Namespace).Create(context.TODO(), generatedSecret, metav1.CreateOptions{})
if err == nil {
c.Secrets[secret.UID] = secret
c.logger.Debugf("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID)
c.logger.Infof("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID)
continue
}
if k8sutil.ResourceAlreadyExists(err) {
Expand Down Expand Up @@ -1134,7 +1135,7 @@ func (c *Cluster) updateSecret(
}

if updateSecret {
c.logger.Debugln(updateSecretMsg)
c.logger.Infof(updateSecretMsg)
if secret, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("could not update secret %s: %v", secretName, err)
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/cluster/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ func logNiceDiff(log *logrus.Entry, old, new interface{}) {
nice := nicediff.Diff(string(o), string(n), true)
for _, s := range strings.Split(nice, "\n") {
// " is not needed in the value to understand
log.Debugf(strings.ReplaceAll(s, "\"", ""))
log.Debug(strings.ReplaceAll(s, "\"", ""))
}
}

Expand All @@ -209,7 +209,7 @@ func (c *Cluster) logStatefulSetChanges(old, new *appsv1.StatefulSet, isUpdate b
logNiceDiff(c.logger, old.Spec, new.Spec)

if !reflect.DeepEqual(old.Annotations, new.Annotations) {
c.logger.Debugf("metadata.annotation are different")
c.logger.Debug("metadata.annotation are different")
logNiceDiff(c.logger, old.Annotations, new.Annotations)
}

Expand Down Expand Up @@ -280,7 +280,7 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
}

if !c.OpConfig.EnableTeamsAPI {
c.logger.Debugf("team API is disabled")
c.logger.Debug("team API is disabled")
return members, nil
}

Expand Down Expand Up @@ -416,7 +416,7 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error {
podsNumber = len(pods.Items)
c.logger.Debugf("Waiting for %d pods to become ready", podsNumber)
} else {
c.logger.Debugf("Waiting for any replica pod to become ready")
c.logger.Debug("Waiting for any replica pod to become ready")
}

err := retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
Expand Down
Loading

0 comments on commit 05c922d

Please sign in to comment.