From bb4d27c4b3cc7df1d004b5f1a5bd067362708e8f Mon Sep 17 00:00:00 2001 From: Marco Date: Thu, 22 Aug 2024 09:38:59 -0500 Subject: [PATCH] OCM-10460 | fix: don't overwrite existing autoscaler value in edit flow --- cmd/edit/autoscaler/autoscaler_suite_test.go | 13 ++ cmd/edit/autoscaler/cmd.go | 177 +++++++---------- cmd/edit/autoscaler/cmd_test.go | 190 +++++++++++++++++++ cmd/edit/cmd.go | 5 +- pkg/clusterautoscaler/flags.go | 73 +++++++ pkg/clusterautoscaler/flags_test.go | 51 +++++ 6 files changed, 403 insertions(+), 106 deletions(-) create mode 100644 cmd/edit/autoscaler/autoscaler_suite_test.go create mode 100644 cmd/edit/autoscaler/cmd_test.go create mode 100644 pkg/clusterautoscaler/flags_test.go diff --git a/cmd/edit/autoscaler/autoscaler_suite_test.go b/cmd/edit/autoscaler/autoscaler_suite_test.go new file mode 100644 index 0000000000..8277c19605 --- /dev/null +++ b/cmd/edit/autoscaler/autoscaler_suite_test.go @@ -0,0 +1,13 @@ +package autoscaler + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestAutoscaler(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Edit Austoscaler Suite") +} diff --git a/cmd/edit/autoscaler/cmd.go b/cmd/edit/autoscaler/cmd.go index 052b26b21c..1108503966 100644 --- a/cmd/edit/autoscaler/cmd.go +++ b/cmd/edit/autoscaler/cmd.go @@ -17,10 +17,9 @@ limitations under the License. package autoscaler import ( - "os" - "strconv" + "context" + "fmt" - commonUtils "github.com/openshift-online/ocm-common/pkg/utils" cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" "github.com/spf13/cobra" @@ -30,15 +29,13 @@ import ( "github.com/openshift/rosa/pkg/rosa" ) -const argsPrefix string = "" - -var Cmd = &cobra.Command{ - Use: "autoscaler", - Aliases: []string{"cluster-autoscaler"}, - Short: "Edit the autoscaler of a cluster", - Long: "Configuring cluster-wide autoscaling behavior. At least one machine-pool should " + - "have autoscaling enabled for the configuration to be active", - Example: ` # Interactively edit an autoscaler to a cluster named "mycluster" +const ( + argsPrefix = "" + use = "autoscaler" + short = "Edit the autoscaler of a cluster" + long = "Configuring cluster-wide autoscaling behavior. At least one machine-pool should " + + "have autoscaling enabled for the configuration to be active" + example = ` # Interactively edit an autoscaler to a cluster named "mycluster" rosa edit autoscaler --cluster=mycluster --interactive # Edit a cluster-autoscaler to skip nodes with local storage @@ -48,118 +45,90 @@ var Cmd = &cobra.Command{ rosa edit autoscaler --cluster=mycluster --log-verbosity 3 # Edit a cluster-autoscaler with total CPU constraints - rosa edit autoscaler --cluster=mycluster --min-cores 10 --max-cores 100`, - Run: run, - Args: cobra.NoArgs, -} + rosa edit autoscaler --cluster=mycluster --min-cores 10 --max-cores 100` +) + +var aliases = []string{"cluster-autoscaler"} -var autoscalerArgs *clusterautoscaler.AutoscalerArgs +func NewEditAutoscalerCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: use, + Aliases: aliases, + Short: short, + Long: long, + Example: example, + Args: cobra.NoArgs, + } -func init() { - flags := Cmd.Flags() + flags := cmd.Flags() flags.SortFlags = false - ocm.AddClusterFlag(Cmd) + ocm.AddClusterFlag(cmd) interactive.AddFlag(flags) - autoscalerArgs = clusterautoscaler.AddClusterAutoscalerFlags(Cmd, argsPrefix) + autoscalerArgs := clusterautoscaler.AddClusterAutoscalerFlags(cmd, argsPrefix) + cmd.Run = rosa.DefaultRunner(rosa.RuntimeWithOCM(), EditAutoscalerRunner(autoscalerArgs)) + return cmd } -func run(cmd *cobra.Command, _ []string) { - r := rosa.NewRuntime().WithOCM() - defer r.Cleanup() +func EditAutoscalerRunner(autoscalerArgs *clusterautoscaler.AutoscalerArgs) rosa.CommandRunner { + return func(ctx context.Context, r *rosa.Runtime, command *cobra.Command, _ []string) error { + clusterKey := r.GetClusterKey() + cluster, err := r.OCMClient.GetCluster(clusterKey, r.Creator) + if err != nil { + return err + } - clusterKey := r.GetClusterKey() + if cluster.Hypershift().Enabled() { + return fmt.Errorf("Hosted Control Plane clusters do not support cluster-autoscaler configuration") + } - cluster := r.FetchCluster() + if cluster.State() != cmv1.ClusterStateReady { + return fmt.Errorf("Cluster '%s' is not yet ready. Current state is '%s'", clusterKey, cluster.State()) + } - if cluster.Hypershift().Enabled() { - r.Reporter.Errorf("Hosted Control Plane clusters do not support cluster-autoscaler configuration") - os.Exit(1) - } + autoscaler, err := r.OCMClient.GetClusterAutoscaler(cluster.ID()) + if err != nil { + return fmt.Errorf("Failed updating autoscaler configuration for cluster '%s': %s", + cluster.ID(), err) + } - if cluster.State() != cmv1.ClusterStateReady { - r.Reporter.Errorf("Cluster '%s' is not yet ready. Current state is '%s'", clusterKey, cluster.State()) - os.Exit(1) - } + if autoscaler == nil { + return fmt.Errorf("No autoscaler for cluster '%s' has been found. "+ + "You should first create it via 'rosa create autoscaler'", clusterKey) + } - autoscaler, err := r.OCMClient.GetClusterAutoscaler(cluster.ID()) - if err != nil { - r.Reporter.Errorf("Failed updating autoscaler configuration for cluster '%s': %s", - cluster.ID(), err) - os.Exit(1) - } + if !clusterautoscaler.IsAutoscalerSetViaCLI(command.Flags(), argsPrefix) && !interactive.Enabled() { + interactive.Enable() + r.Reporter.Infof("Enabling interactive mode") + } - if autoscaler == nil { - r.Reporter.Errorf("No autoscaler for cluster '%s' has been found. "+ - "You should first create it via 'rosa create autoscaler'", clusterKey) - os.Exit(1) - } + r.Reporter.Debugf("Updating autoscaler for cluster '%s'", clusterKey) - if !clusterautoscaler.IsAutoscalerSetViaCLI(cmd.Flags(), argsPrefix) && !interactive.Enabled() { - interactive.Enable() - r.Reporter.Infof("Enabling interactive mode") - } + autoscalerArgs, err := clusterautoscaler.PrefillAutoscalerArgs(command, autoscalerArgs, autoscaler) + if err != nil { + return fmt.Errorf("Failed updating autoscaler configuration for cluster '%s': %s", + cluster.ID(), err) + } - r.Reporter.Debugf("Updating autoscaler for cluster '%s'", clusterKey) - - if interactive.Enabled() { - // pre-filling the parameters from the existing resource if running in interactive mode - - autoscalerArgs.BalanceSimilarNodeGroups = autoscaler.BalanceSimilarNodeGroups() - autoscalerArgs.SkipNodesWithLocalStorage = autoscaler.SkipNodesWithLocalStorage() - autoscalerArgs.LogVerbosity = autoscaler.LogVerbosity() - autoscalerArgs.MaxPodGracePeriod = autoscaler.MaxPodGracePeriod() - autoscalerArgs.PodPriorityThreshold = autoscaler.PodPriorityThreshold() - autoscalerArgs.IgnoreDaemonsetsUtilization = autoscaler.IgnoreDaemonsetsUtilization() - autoscalerArgs.MaxNodeProvisionTime = autoscaler.MaxNodeProvisionTime() - autoscalerArgs.BalancingIgnoredLabels = autoscaler.BalancingIgnoredLabels() - autoscalerArgs.ResourceLimits.MaxNodesTotal = autoscaler.ResourceLimits().MaxNodesTotal() - autoscalerArgs.ResourceLimits.Cores.Min = autoscaler.ResourceLimits().Cores().Min() - autoscalerArgs.ResourceLimits.Cores.Max = autoscaler.ResourceLimits().Cores().Max() - autoscalerArgs.ResourceLimits.Memory.Min = autoscaler.ResourceLimits().Memory().Min() - autoscalerArgs.ResourceLimits.Memory.Max = autoscaler.ResourceLimits().Memory().Max() - - // be aware we cannot easily pre-load GPU limits from existing configuration, so we'll have to - // request it from scratch when interactive mode is on - - autoscalerArgs.ScaleDown.Enabled = autoscaler.ScaleDown().Enabled() - autoscalerArgs.ScaleDown.UnneededTime = autoscaler.ScaleDown().UnneededTime() - autoscalerArgs.ScaleDown.DelayAfterAdd = autoscaler.ScaleDown().DelayAfterAdd() - autoscalerArgs.ScaleDown.DelayAfterDelete = autoscaler.ScaleDown().DelayAfterDelete() - autoscalerArgs.ScaleDown.DelayAfterFailure = autoscaler.ScaleDown().DelayAfterFailure() - - utilizationThreshold, err := strconv.ParseFloat( - autoscaler.ScaleDown().UtilizationThreshold(), - commonUtils.MaxByteSize, - ) + autoscalerArgs, err = clusterautoscaler.GetAutoscalerOptions(command.Flags(), "", false, autoscalerArgs) if err != nil { - r.Reporter.Errorf("Failed updating autoscaler configuration for cluster '%s': %s", + return fmt.Errorf("Failed updating autoscaler configuration for cluster '%s': %s", cluster.ID(), err) - os.Exit(1) } - autoscalerArgs.ScaleDown.UtilizationThreshold = utilizationThreshold - } - autoscalerArgs, err := clusterautoscaler.GetAutoscalerOptions(cmd.Flags(), "", false, autoscalerArgs) - if err != nil { - r.Reporter.Errorf("Failed updating autoscaler configuration for cluster '%s': %s", - cluster.ID(), err) - os.Exit(1) - } + autoscalerConfig, err := clusterautoscaler.CreateAutoscalerConfig(autoscalerArgs) + if err != nil { + return fmt.Errorf("Failed updating autoscaler configuration for cluster '%s': %s", + cluster.ID(), err) + } - autoscalerConfig, err := clusterautoscaler.CreateAutoscalerConfig(autoscalerArgs) - if err != nil { - r.Reporter.Errorf("Failed updating autoscaler configuration for cluster '%s': %s", - cluster.ID(), err) - os.Exit(1) - } + _, err = r.OCMClient.UpdateClusterAutoscaler(cluster.ID(), autoscalerConfig) + if err != nil { + return fmt.Errorf("Failed updating autoscaler configuration for cluster '%s': %s", + cluster.ID(), err) + } - _, err = r.OCMClient.UpdateClusterAutoscaler(cluster.ID(), autoscalerConfig) - if err != nil { - r.Reporter.Errorf("Failed updating autoscaler configuration for cluster '%s': %s", - cluster.ID(), err) - os.Exit(1) + r.Reporter.Infof("Successfully updated autoscaler configuration for cluster '%s'", cluster.ID()) + return nil } - - r.Reporter.Infof("Successfully updated autoscaler configuration for cluster '%s'", cluster.ID()) } diff --git a/cmd/edit/autoscaler/cmd_test.go b/cmd/edit/autoscaler/cmd_test.go new file mode 100644 index 0000000000..d8747a5d8b --- /dev/null +++ b/cmd/edit/autoscaler/cmd_test.go @@ -0,0 +1,190 @@ +package autoscaler + +import ( + "context" + "fmt" + "net/http" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + . "github.com/onsi/gomega/ghttp" + cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + . "github.com/openshift-online/ocm-sdk-go/testing" + + "github.com/openshift/rosa/pkg/clusterautoscaler" + "github.com/openshift/rosa/pkg/output" + "github.com/openshift/rosa/pkg/test" + . "github.com/openshift/rosa/pkg/test" +) + +var _ = Describe("edit autoscaler", func() { + + It("Correctly builds the command", func() { + cmd := NewEditAutoscalerCommand() + Expect(cmd).NotTo(BeNil()) + + Expect(cmd.Use).To(Equal(use)) + Expect(cmd.Short).To(Equal(short)) + Expect(cmd.Long).To(Equal(long)) + Expect(cmd.Run).NotTo(BeNil()) + + Expect(cmd.Flags().Lookup("balance-similar-node-groups")).NotTo(BeNil()) + Expect(cmd.Flags().Lookup("log-verbosity")).NotTo(BeNil()) + Expect(cmd.Flags().Lookup("max-nodes-total")).NotTo(BeNil()) + Expect(cmd.Flags().Lookup("scale-down-enabled")).NotTo(BeNil()) + }) + + Context("Edit Autoscaler Runner", func() { + + var t *TestingRuntime + + BeforeEach(func() { + t = NewTestRuntime() + output.SetOutput("") + }) + + AfterEach(func() { + output.SetOutput("") + }) + + It("Returns an error if the cluster does not exist", func() { + t.ApiServer.AppendHandlers(RespondWithJSON(http.StatusOK, FormatClusterList(make([]*cmv1.Cluster, 0)))) + t.SetCluster("cluster", nil) + + runner := EditAutoscalerRunner(&clusterautoscaler.AutoscalerArgs{}) + err := runner(context.Background(), t.RosaRuntime, nil, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + Equal("There is no cluster with identifier or name 'cluster'")) + }) + + It("Returns an error if the cluster is not classic cluster", func() { + cluster := MockCluster(func(c *cmv1.ClusterBuilder) { + b := cmv1.HypershiftBuilder{} + b.Enabled(true) + c.Hypershift(&b) + }) + + t.ApiServer.AppendHandlers( + RespondWithJSON( + http.StatusOK, FormatClusterList([]*cmv1.Cluster{cluster}))) + t.SetCluster("cluster", nil) + + runner := EditAutoscalerRunner(&clusterautoscaler.AutoscalerArgs{}) + err := runner(context.Background(), t.RosaRuntime, nil, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + Equal("Hosted Control Plane clusters do not support cluster-autoscaler configuration")) + }) + + It("Returns an error if the cluster is not ready", func() { + cluster := MockCluster(func(c *cmv1.ClusterBuilder) { + c.State(cmv1.ClusterStateInstalling) + }) + + t.ApiServer.AppendHandlers( + RespondWithJSON( + http.StatusOK, FormatClusterList([]*cmv1.Cluster{cluster}))) + t.SetCluster("cluster", nil) + + runner := EditAutoscalerRunner(&clusterautoscaler.AutoscalerArgs{}) + err := runner(context.Background(), t.RosaRuntime, nil, nil) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + Equal("Cluster 'cluster' is not yet ready. Current state is 'installing'")) + }) + + It("Returns an error if no autoscaler exists for classic cluster", func() { + cluster := MockCluster(func(c *cmv1.ClusterBuilder) { + c.State(cmv1.ClusterStateReady) + }) + + t.ApiServer.AppendHandlers( + RespondWithJSON( + http.StatusOK, FormatClusterList([]*cmv1.Cluster{cluster}))) + t.ApiServer.AppendHandlers( + RespondWithJSON(http.StatusNotFound, "{}")) + t.SetCluster("cluster", nil) + + runner := EditAutoscalerRunner(&clusterautoscaler.AutoscalerArgs{}) + err := runner(context.Background(), t.RosaRuntime, nil, nil) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + Equal(fmt.Sprintf("No autoscaler for cluster '%s' has been found. "+ + "You should first create it via 'rosa create autoscaler'", "cluster"))) + }) + + It("Updates Austoscaler", func() { + cluster := MockCluster(func(c *cmv1.ClusterBuilder) { + c.State(cmv1.ClusterStateReady) + }) + + autoscaler := test.MockAutoscaler(func(a *cmv1.ClusterAutoscalerBuilder) { + a.MaxNodeProvisionTime("10m") + a.BalancingIgnoredLabels("foo", "bar") + a.PodPriorityThreshold(10) + a.LogVerbosity(2) + a.MaxPodGracePeriod(10) + a.IgnoreDaemonsetsUtilization(true) + a.SkipNodesWithLocalStorage(true) + a.BalanceSimilarNodeGroups(false) + + sd := &cmv1.AutoscalerScaleDownConfigBuilder{} + sd.Enabled(true) + sd.DelayAfterFailure("10m") + sd.DelayAfterAdd("5m") + sd.DelayAfterDelete("20m") + sd.UnneededTime("25m") + sd.UtilizationThreshold("0.5") + a.ScaleDown(sd) + + rl := &cmv1.AutoscalerResourceLimitsBuilder{} + rl.MaxNodesTotal(10) + + mem := &cmv1.ResourceRangeBuilder{} + mem.Max(10).Min(5) + rl.Memory(mem) + + cores := &cmv1.ResourceRangeBuilder{} + cores.Min(20).Max(30) + rl.Cores(cores) + + gpus := &cmv1.AutoscalerResourceLimitsGPULimitBuilder{} + gpus.Type("nvidia.com/gpu") + + gpuRR := &cmv1.ResourceRangeBuilder{} + gpuRR.Max(20).Min(10) + gpus.Range(gpuRR) + + rl.GPUS(gpus) + a.ResourceLimits(rl) + }) + + t.ApiServer.AppendHandlers( + RespondWithJSON( + http.StatusOK, FormatClusterList([]*cmv1.Cluster{cluster}))) + t.ApiServer.RouteToHandler(http.MethodGet, + fmt.Sprintf("/api/clusters_mgmt/v1/clusters/%s/autoscaler", cluster.ID()), + RespondWithJSON(http.StatusOK, FormatResource(autoscaler))) + t.ApiServer.RouteToHandler(http.MethodPatch, + fmt.Sprintf("/api/clusters_mgmt/v1/clusters/%s/autoscaler", cluster.ID()), + CombineHandlers( + RespondWithJSON(http.StatusOK, FormatResource(autoscaler)), + VerifyJQ(`.log_verbosity`, 1.0), + VerifyJQ(`.resource_limits.max_nodes_total`, 20.0), + VerifyJQ(`.resource_limits.cores.max`, 30.0), + )) + args := &clusterautoscaler.AutoscalerArgs{} + args.LogVerbosity = 1 + args.ResourceLimits.MaxNodesTotal = 20 + runner := EditAutoscalerRunner(args) + cmd := NewEditAutoscalerCommand() + cmd.Flags().Set("log-verbosity", "1") + cmd.Flags().Set("max-nodes-total", "20") + t.SetCluster("cluster", nil) + err := runner(context.Background(), t.RosaRuntime, cmd, nil) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/cmd/edit/cmd.go b/cmd/edit/cmd.go index 96c2964daa..64fd36436a 100644 --- a/cmd/edit/cmd.go +++ b/cmd/edit/cmd.go @@ -46,7 +46,8 @@ func init() { Cmd.AddCommand(ingress.Cmd) Cmd.AddCommand(service.Cmd) Cmd.AddCommand(tuningconfigs.Cmd) - Cmd.AddCommand(autoscaler.Cmd) + autoScaler := autoscaler.NewEditAutoscalerCommand() + Cmd.AddCommand(autoScaler) kubeletConfig := kubeletconfig.NewEditKubeletConfigCommand() Cmd.AddCommand(kubeletConfig) @@ -59,7 +60,7 @@ func init() { machinepoolCommand := machinepool.NewEditMachinePoolCommand() Cmd.AddCommand(machinepoolCommand) globallyAvailableCommands := []*cobra.Command{ - autoscaler.Cmd, addon.Cmd, + autoScaler, addon.Cmd, service.Cmd, cluster.Cmd, ingress.Cmd, kubeletConfig, machinepoolCommand, tuningconfigs.Cmd, diff --git a/pkg/clusterautoscaler/flags.go b/pkg/clusterautoscaler/flags.go index 1d8d66806d..0d51a3b87a 100644 --- a/pkg/clusterautoscaler/flags.go +++ b/pkg/clusterautoscaler/flags.go @@ -5,6 +5,8 @@ import ( "strconv" "strings" + commonUtils "github.com/openshift-online/ocm-common/pkg/utils" + cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -872,3 +874,74 @@ func getValidMaxRangeValidator(min int) func(interface{}) error { return nil } } + +// PrefillAutoscalerArgs prefill autoscaler args from existing autoscaler in edit flow +func PrefillAutoscalerArgs(cmd *cobra.Command, autoscalerArgs *AutoscalerArgs, + autoscaler *cmv1.ClusterAutoscaler) (*AutoscalerArgs, error) { + + if !cmd.Flags().Changed(balanceSimilarNodeGroupsFlag) { + autoscalerArgs.BalanceSimilarNodeGroups = autoscaler.BalanceSimilarNodeGroups() + } + if !cmd.Flags().Changed(skipNodesWithLocalStorageFlag) { + autoscalerArgs.SkipNodesWithLocalStorage = autoscaler.SkipNodesWithLocalStorage() + } + if !cmd.Flags().Changed(logVerbosityFlag) { + autoscalerArgs.LogVerbosity = autoscaler.LogVerbosity() + } + if !cmd.Flags().Changed(maxPodGracePeriodFlag) { + autoscalerArgs.MaxPodGracePeriod = autoscaler.MaxPodGracePeriod() + } + if !cmd.Flags().Changed(podPriorityThresholdFlag) { + autoscalerArgs.PodPriorityThreshold = autoscaler.PodPriorityThreshold() + } + if !cmd.Flags().Changed(ignoreDaemonsetsUtilizationFlag) { + autoscalerArgs.IgnoreDaemonsetsUtilization = autoscaler.IgnoreDaemonsetsUtilization() + } + if !cmd.Flags().Changed(maxNodeProvisionTimeFlag) { + autoscalerArgs.MaxNodeProvisionTime = autoscaler.MaxNodeProvisionTime() + } + if !cmd.Flags().Changed(balancingIgnoredLabelsFlag) { + autoscalerArgs.BalancingIgnoredLabels = autoscaler.BalancingIgnoredLabels() + } + if !cmd.Flags().Changed(maxNodesTotalFlag) { + autoscalerArgs.ResourceLimits.MaxNodesTotal = autoscaler.ResourceLimits().MaxNodesTotal() + } + if !cmd.Flags().Changed(minCoresFlag) { + autoscalerArgs.ResourceLimits.Cores.Min = autoscaler.ResourceLimits().Cores().Min() + } + if !cmd.Flags().Changed(maxCoresFlag) { + autoscalerArgs.ResourceLimits.Cores.Max = autoscaler.ResourceLimits().Cores().Max() + } + if !cmd.Flags().Changed(minMemoryFlag) { + autoscalerArgs.ResourceLimits.Memory.Min = autoscaler.ResourceLimits().Memory().Min() + } + if !cmd.Flags().Changed(maxMemoryFlag) { + autoscalerArgs.ResourceLimits.Memory.Max = autoscaler.ResourceLimits().Memory().Max() + } + if !cmd.Flags().Changed(scaleDownEnabledFlag) { + autoscalerArgs.ScaleDown.Enabled = autoscaler.ScaleDown().Enabled() + } + if !cmd.Flags().Changed(scaleDownUnneededTimeFlag) { + autoscalerArgs.ScaleDown.UnneededTime = autoscaler.ScaleDown().UnneededTime() + } + if !cmd.Flags().Changed(scaleDownDelayAfterAddFlag) { + autoscalerArgs.ScaleDown.DelayAfterAdd = autoscaler.ScaleDown().DelayAfterAdd() + } + if !cmd.Flags().Changed(scaleDownDelayAfterDeleteFlag) { + autoscalerArgs.ScaleDown.DelayAfterDelete = autoscaler.ScaleDown().DelayAfterDelete() + } + if !cmd.Flags().Changed(scaleDownDelayAfterFailureFlag) { + autoscalerArgs.ScaleDown.DelayAfterFailure = autoscaler.ScaleDown().DelayAfterFailure() + } + if !cmd.Flags().Changed(scaleDownUtilizationThresholdFlag) { + utilizationThreshold, err := strconv.ParseFloat( + autoscaler.ScaleDown().UtilizationThreshold(), + commonUtils.MaxByteSize, + ) + if err != nil { + return autoscalerArgs, err + } + autoscalerArgs.ScaleDown.UtilizationThreshold = utilizationThreshold + } + return autoscalerArgs, nil +} diff --git a/pkg/clusterautoscaler/flags_test.go b/pkg/clusterautoscaler/flags_test.go new file mode 100644 index 0000000000..e3a88cb488 --- /dev/null +++ b/pkg/clusterautoscaler/flags_test.go @@ -0,0 +1,51 @@ +package clusterautoscaler + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + "github.com/spf13/cobra" + + "github.com/openshift/rosa/pkg/test" +) + +var _ = Describe("Cluster Autoscaler flags", func() { + + It("Validate PrefillAutoscalerArgs function", func() { + args := &AutoscalerArgs{} + cmd := &cobra.Command{} + flags := cmd.Flags() + flags.IntVar( + &args.ResourceLimits.Cores.Min, + "min-cores", + 0, + "Minimum limit for the amount of cores to deploy in the cluster.", + ) + + flags.IntVar( + &args.ResourceLimits.Cores.Max, + "max-cores", + 180*64, + "Maximum limit for the amount of cores to deploy in the cluster.", + ) + flags.Set("max-cores", "20") + flags.Set("min-cores", "10") + autoscaler := test.MockAutoscaler(func(a *cmv1.ClusterAutoscalerBuilder) { + sd := &cmv1.AutoscalerScaleDownConfigBuilder{} + sd.UtilizationThreshold("0.5") + a.ScaleDown(sd) + + rl := &cmv1.AutoscalerResourceLimitsBuilder{} + rl.MaxNodesTotal(10) + cores := &cmv1.ResourceRangeBuilder{} + cores.Min(20).Max(30) + rl.Cores(cores) + a.ResourceLimits(rl) + }) + prefilledArgs, err := PrefillAutoscalerArgs(cmd, args, autoscaler) + Expect(err).NotTo(HaveOccurred()) + Expect(prefilledArgs.ResourceLimits.MaxNodesTotal).To(Equal(10)) + Expect(prefilledArgs.ResourceLimits.Cores.Max).To(Equal(20)) + Expect(prefilledArgs.ResourceLimits.Cores.Min).To(Equal(10)) + }) +})