Skip to content

Commit

Permalink
Merge pull request #4764 from chaosi-zju/hpa-e2e
Browse files Browse the repository at this point in the history
add e2e test for deployment replicas syncer
  • Loading branch information
karmada-bot authored Mar 28, 2024
2 parents 0b19f4d + 8b9e1e2 commit 57c1989
Show file tree
Hide file tree
Showing 3 changed files with 229 additions and 58 deletions.
254 changes: 196 additions & 58 deletions test/e2e/deploymentreplicassyncer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package e2e

import (
"context"
"sort"
"time"

"github.com/onsi/ginkgo/v2"
Expand All @@ -30,81 +31,218 @@ import (
"k8s.io/utils/pointer"

policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/names"
"github.com/karmada-io/karmada/test/e2e/framework"
"github.com/karmada-io/karmada/test/helper"
)

var _ = ginkgo.Describe("hpa replicas synchronization testing", func() {
ginkgo.Context("Replicas synchronization testing", func() {
var initReplicas = int32(1)
var policyNamespace, policyName string
var namespace, deploymentName, hpaName string
var deployment *appsv1.Deployment
var hpa *autoscalingv2.HorizontalPodAutoscaler
var policy *policyv1alpha1.PropagationPolicy
var _ = ginkgo.Describe("deployment replicas syncer testing", func() {
var namespace string
var deploymentName, hpaName, policyName, bindingName string
var deployment *appsv1.Deployment
var hpa *autoscalingv2.HorizontalPodAutoscaler
var policy *policyv1alpha1.PropagationPolicy
var targetClusters []string

ginkgo.BeforeEach(func() {
namespace = testNamespace
deploymentName = deploymentNamePrefix + rand.String(RandomStrLength)
hpaName = deploymentName
policyName = deploymentName
bindingName = names.GenerateBindingName(util.DeploymentKind, deploymentName)

// sort member clusters in increasing order
targetClusters = framework.ClusterNames()[0:2]
sort.Strings(targetClusters)

deployment = helper.NewDeployment(namespace, deploymentName)
hpa = helper.NewHPA(namespace, hpaName, deploymentName)
hpa.Spec.MinReplicas = pointer.Int32(2)
policy = helper.NewPropagationPolicy(namespace, policyName, []policyv1alpha1.ResourceSelector{
{APIVersion: deployment.APIVersion, Kind: deployment.Kind, Name: deployment.Name},
{APIVersion: hpa.APIVersion, Kind: hpa.Kind, Name: hpa.Name},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
ClusterNames: targetClusters,
},
})
})

ginkgo.JustBeforeEach(func() {
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateDeployment(kubeClient, deployment)
framework.CreateHPA(kubeClient, hpa)

ginkgo.DeferCleanup(func() {
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
framework.RemoveHPA(kubeClient, namespace, hpa.Name)
framework.WaitDeploymentDisappearOnClusters(targetClusters, deployment.Namespace, deployment.Name)
})
})

ginkgo.Context("when policy is Duplicated schedule type", func() {
ginkgo.BeforeEach(func() {
policyNamespace = testNamespace
namespace = testNamespace
policyName = deploymentNamePrefix + rand.String(RandomStrLength)
deploymentName = policyName
hpaName = policyName

deployment = helper.NewDeployment(namespace, deploymentName)
deployment.Spec.Replicas = pointer.Int32(initReplicas)
hpa = helper.NewHPA(namespace, hpaName, deploymentName)
hpa.Spec.MinReplicas = pointer.Int32(2)
deployment.Spec.Replicas = pointer.Int32(2)
})

// Case 1: Deployment(replicas=2) | Policy(Duplicated, two clusters) | HPA(minReplicas=2)
// Expected result: hpa scaling not take effect in updating spec, manually modify spec have no action.
ginkgo.It("general case combined hpa scaling and manually modify in Duplicated type", func() {
ginkgo.By("step1: propagate each 2 replicas to two clusters", func() {
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{2, 2})
assertDeploymentTemplateReplicas(namespace, deploymentName, 2)
})

policy = helper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: deployment.APIVersion,
Kind: deployment.Kind,
Name: deployment.Name,
},
{
APIVersion: hpa.APIVersion,
Kind: hpa.Kind,
Name: hpa.Name,
},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
ClusterNames: framework.ClusterNames(),
},
ginkgo.By("step2: hpa scale each member cluster replicas from 2 to 3", func() {
framework.UpdateHPAWithMinReplicas(kubeClient, namespace, hpa.Name, 3)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
assertDeploymentTemplateReplicas(namespace, deploymentName, 2)
})

ginkgo.By("step3: manually add deployment template replicas from 2 to 4", func() {
framework.UpdateDeploymentReplicas(kubeClient, deployment, 4)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
assertDeploymentTemplateReplicas(namespace, deploymentName, 4)
})

ginkgo.By("step4: manually decrease deployment template replicas from 2 to 1", func() {
framework.UpdateDeploymentReplicas(kubeClient, deployment, 1)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
assertDeploymentTemplateReplicas(namespace, deploymentName, 1)
})
})
})

ginkgo.Context("when policy is Divided schedule type, each cluster have more that one replica", func() {
ginkgo.BeforeEach(func() {
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateDeployment(kubeClient, deployment)
framework.CreateHPA(kubeClient, hpa)
ginkgo.DeferCleanup(func() {
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
framework.RemoveHPA(kubeClient, namespace, hpa.Name)
framework.WaitDeploymentDisappearOnClusters(framework.ClusterNames(), deployment.Namespace, deployment.Name)
policy.Spec.Placement.ReplicaScheduling = helper.NewStaticWeightPolicyStrategy(targetClusters, []int64{1, 1})
deployment.Spec.Replicas = pointer.Int32(4)
})

// Case 2: Deployment(replicas=4) | Policy(Divided, two clusters 1:1) | HPA(minReplicas=2)
// Expected result: hpa scaling can take effect in updating spec, while manually modify not.
ginkgo.It("general case combined hpa scaling and manually modify in Divided type", func() {
ginkgo.By("step1: propagate 4 replicas to two clusters", func() {
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{2, 2})
assertDeploymentTemplateReplicas(namespace, deploymentName, 4)
})

ginkgo.By("step2: hpa scale each member cluster replicas from 2 to 3", func() {
framework.UpdateHPAWithMinReplicas(kubeClient, namespace, hpa.Name, 3)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
assertDeploymentTemplateReplicas(namespace, deploymentName, 6)
})

ginkgo.By("step3: manually add deployment template replicas from 6 to 10", func() {
framework.UpdateDeploymentReplicas(kubeClient, deployment, 10)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
assertDeploymentTemplateReplicas(namespace, deploymentName, 6)
})

ginkgo.By("step4: manually decrease deployment template replicas from 6 to 2", func() {
framework.UpdateDeploymentReplicas(kubeClient, deployment, 2)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{3, 3})
assertDeploymentTemplateReplicas(namespace, deploymentName, 6)
})
})
})

ginkgo.It("deployment has been scaled up and synchronized to Karmada", func() {
framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name,
func(deployment *appsv1.Deployment) bool {
return true
})
ginkgo.Context("when policy is Divided schedule type, one cluster have no replica", func() {
ginkgo.BeforeEach(func() {
policy.Spec.Placement.ReplicaScheduling = helper.NewStaticWeightPolicyStrategy(targetClusters, []int64{1, 1})
deployment.Spec.Replicas = pointer.Int32(1)
hpa.Spec.MinReplicas = pointer.Int32(1)
})

// Case 3: Deployment(replicas=1) | Policy(Divided, two clusters 1:1) | HPA(minReplicas=1)
// Expected result: manually modify can take effect in updating spec.
ginkgo.It("0/1 case, manually modify replicas from 1 to 2", func() {
ginkgo.By("step1: propagate 1 replicas to two clusters", func() {
assertDeploymentTemplateReplicas(namespace, deploymentName, 1)
})

ginkgo.By("step2: manually add deployment template replicas from 1 to 2", func() {
framework.UpdateDeploymentReplicas(kubeClient, deployment, 2)
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{1, 1})
assertDeploymentTemplateReplicas(namespace, deploymentName, 2)
})
})
})

ginkgo.Context("when policy is Divided schedule type, remove one cluster's replicas", func() {
ginkgo.BeforeEach(func() {
policy.Spec.Placement.ReplicaScheduling = helper.NewStaticWeightPolicyStrategy(targetClusters, []int64{1, 1})
deployment.Spec.Replicas = pointer.Int32(2)
hpa.Spec.MinReplicas = pointer.Int32(1)
})

framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name,
func(deployment *appsv1.Deployment) bool {
return *deployment.Spec.Replicas == initReplicas
// Case 4: Deployment(replicas=2) | Policy(Divided, two clusters 1:1) | HPA(minReplicas=1)
// Expected result: manually modify can take effect in updating spec.
ginkgo.It("0/1 case, manually modify replicas from 2 to 1", func() {
ginkgo.By("step1: propagate 2 replicas to two clusters", func() {
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{1, 1})
assertDeploymentTemplateReplicas(namespace, deploymentName, 2)
})

ginkgo.By("step2: manually add deployment template replicas from 2 to 1", func() {
framework.UpdateDeploymentReplicas(kubeClient, deployment, 1)
framework.WaitResourceBindingFitWith(karmadaClient, namespace, bindingName, func(rb *workv1alpha2.ResourceBinding) bool {
return len(rb.Status.AggregatedStatus) == 1
})
assertDeploymentTemplateReplicas(namespace, deploymentName, 1)
})
})
})

ginkgo.Context("when policy is Divided schedule type, propagate 1 replica but hpa minReplicas is 2", func() {
ginkgo.BeforeEach(func() {
policy.Spec.Placement.ReplicaScheduling = helper.NewStaticWeightPolicyStrategy(targetClusters, []int64{1, 1})
deployment.Spec.Replicas = pointer.Int32(1)
hpa.Spec.MinReplicas = pointer.Int32(2)
})

expectedReplicas := initReplicas
gomega.Eventually(func() bool {
deploymentExist, err := kubeClient.AppsV1().Deployments(deployment.Namespace).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
if err != nil {
return false
}
klog.Infof("got: %d, expect: %d", *deploymentExist.Spec.Replicas, expectedReplicas)
return (*deploymentExist.Spec.Replicas == expectedReplicas) && (deploymentExist.Generation == deploymentExist.Status.ObservedGeneration)
}, time.Minute, pollInterval).Should(gomega.Equal(true))
// Case 5: Deployment(replicas=1) | Policy(Divided, two clusters 1:1) | HPA(minReplicas=2)
// Expected result: it will go through such a process:
// 1. deployment.spec.replicas=1, actual replicas in member1:member2 = 1:0
// 2. hpa take effect in member1, so actual replicas in member1:member2 = 2:0
// 3. deployment template updated to 2/2
// 4. reschedule, assign replicas to member1:member2 = 1:1
// 5. member1 replicas is retained, so actual replicas in member1:member2 = 2:1
// 6. hpa take effect in member2, so replicas becomes member1:member2 = 2:2
// 7. deployment template updated to 4/4
ginkgo.It("propagate 1 replica but hpa minReplicas is 2", func() {
assertDeploymentWorkloadReplicas(namespace, deploymentName, targetClusters, []int32{2, 2})
assertDeploymentTemplateReplicas(namespace, deploymentName, 4)
})
})
})

// assertDeploymentWorkloadReplicas assert replicas in each member cluster eventually equal to @expectedReplicas
func assertDeploymentWorkloadReplicas(namespace, name string, clusters []string, expectedReplicas []int32) {
gomega.Expect(len(clusters)).Should(gomega.Equal(len(expectedReplicas)))
for i, cluster := range clusters {
if expectedReplicas[i] == 0 {
framework.WaitDeploymentDisappearOnCluster(cluster, namespace, name)
return
}
framework.WaitDeploymentPresentOnClustersFitWith([]string{cluster}, namespace, name, func(deployment *appsv1.Deployment) bool {
klog.Infof("in %s cluster, got: %d, expect: %d", cluster, *deployment.Spec.Replicas, expectedReplicas[i])
return *deployment.Spec.Replicas == expectedReplicas[i]
})
}
}

// assertDeploymentTemplateReplicas assert replicas in template spec eventually equal to @expectedSpecReplicas
func assertDeploymentTemplateReplicas(namespace, name string, expectedSpecReplicas int32) {
gomega.Eventually(func() bool {
deploymentExist, err := kubeClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false
}
klog.Infof("template spec replicas, got: %d, expect: %d", *deploymentExist.Spec.Replicas, expectedSpecReplicas)
return (*deploymentExist.Spec.Replicas == expectedSpecReplicas) && (deploymentExist.Generation == deploymentExist.Status.ObservedGeneration)
}, time.Minute, pollInterval).Should(gomega.Equal(true))
}
12 changes: 12 additions & 0 deletions test/e2e/framework/hpa.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,3 +42,15 @@ func RemoveHPA(client kubernetes.Interface, namespace, name string) {
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}

// UpdateHPAWithMinReplicas update HPA with replicas.
func UpdateHPAWithMinReplicas(client kubernetes.Interface, namespace, name string, minReplicas int32) {
ginkgo.By(fmt.Sprintf("Updating HPA(%s/%s)", namespace, name), func() {
newHPA, err := client.AutoscalingV2().HorizontalPodAutoscalers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())

newHPA.Spec.MinReplicas = &minReplicas
_, err = client.AutoscalingV2().HorizontalPodAutoscalers(namespace).Update(context.TODO(), newHPA, metav1.UpdateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
21 changes: 21 additions & 0 deletions test/helper/policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ limitations under the License.
package helper

import (
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -97,6 +98,26 @@ func NewExplicitPriorityClusterPropagationPolicy(policyName string, rsSelectors
}
}

// NewStaticWeightPolicyStrategy create static weight policy strategy with specific weights
// e.g: @clusters=[member1, member2], @weights=[1, 1], means static weight `member1:member2=1:1`
func NewStaticWeightPolicyStrategy(clusters []string, weights []int64) *policyv1alpha1.ReplicaSchedulingStrategy {
gomega.Expect(len(clusters)).Should(gomega.Equal(len(weights)))
staticWeightList := make([]policyv1alpha1.StaticClusterWeight, 0)
for i, clusterName := range clusters {
staticWeightList = append(staticWeightList, policyv1alpha1.StaticClusterWeight{
TargetCluster: policyv1alpha1.ClusterAffinity{ClusterNames: []string{clusterName}},
Weight: weights[i],
})
}
return &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted,
WeightPreference: &policyv1alpha1.ClusterPreferences{
StaticWeightList: staticWeightList,
},
}
}

// NewOverridePolicy will build a OverridePolicy object.
func NewOverridePolicy(namespace, policyName string, rsSelectors []policyv1alpha1.ResourceSelector, clusterAffinity policyv1alpha1.ClusterAffinity, overriders policyv1alpha1.Overriders) *policyv1alpha1.OverridePolicy {
return &policyv1alpha1.OverridePolicy{
Expand Down

0 comments on commit 57c1989

Please sign in to comment.