Skip to content

Commit

Permalink
Merge pull request #3791 from jwcesign/add-e2e-cronfhpa
Browse files Browse the repository at this point in the history
e2e: Add e2e for CronFederatedHPA
  • Loading branch information
karmada-bot authored Jul 20, 2023
2 parents d27faea + 7ac69c1 commit 993bdfa
Show file tree
Hide file tree
Showing 8 changed files with 437 additions and 1 deletion.
8 changes: 8 additions & 0 deletions hack/deploy-karmada.sh
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,9 @@ util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_AGGREGATION_APISERVER_LAB
# deploy karmada-search on host cluster
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-search.yaml"
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_SEARCH_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
# deploy karmada-metrics-adapter on host cluster
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-metrics-adapter.yaml"
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_METRICS_ADAPTER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"

# install CRD APIs on karmada apiserver.
if ! kubectl config get-contexts "karmada-apiserver" > /dev/null 2>&1;
Expand Down Expand Up @@ -260,6 +263,11 @@ kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/ka
# make sure apiservice for v1alpha1.search.karmada.io is Available
util::wait_apiservice_ready "karmada-apiserver" "${KARMADA_SEARCH_LABEL}"

# deploy APIService on karmada apiserver for karmada-metrics-adapter
kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-metrics-adapter-apiservice.yaml"
# make sure apiservice for karmada metrics adapter is Available
util::wait_apiservice_ready "karmada-apiserver" "${KARMADA_METRICS_ADAPTER_LABEL}"

# deploy cluster proxy rbac for admin
kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/cluster-proxy-admin-rbac.yaml"

Expand Down
8 changes: 7 additions & 1 deletion hack/local-up-karmada.sh
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ kind load docker-image "${REGISTRY}/karmada-webhook:${VERSION}" --name="${HOST_C
kind load docker-image "${REGISTRY}/karmada-scheduler-estimator:${VERSION}" --name="${HOST_CLUSTER_NAME}"
kind load docker-image "${REGISTRY}/karmada-aggregated-apiserver:${VERSION}" --name="${HOST_CLUSTER_NAME}"
kind load docker-image "${REGISTRY}/karmada-search:${VERSION}" --name="${HOST_CLUSTER_NAME}"
kind load docker-image "${REGISTRY}/karmada-metrics-adapter:${VERSION}" --name="${HOST_CLUSTER_NAME}"

#step5. install karmada control plane components
"${REPO_ROOT}"/hack/deploy-karmada.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}"
Expand Down Expand Up @@ -169,12 +170,17 @@ kind load docker-image "${REGISTRY}/karmada-agent:${VERSION}" --name="${PULL_MOD
#step7. deploy karmada agent in pull mode member clusters
"${REPO_ROOT}"/hack/deploy-agent-and-estimator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "${MAIN_KUBECONFIG}" "${KARMADA_APISERVER_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${PULL_MODE_CLUSTER_NAME}"

#step8. deploy metrics adapter in member clusters
"${REPO_ROOT}"/hack/deploy-k8s-metrics-server.sh "${MEMBER_CLUSTER_1_TMP_CONFIG}" "${MEMBER_CLUSTER_1_NAME}"
"${REPO_ROOT}"/hack/deploy-k8s-metrics-server.sh "${MEMBER_CLUSTER_2_TMP_CONFIG}" "${MEMBER_CLUSTER_2_NAME}"
"${REPO_ROOT}"/hack/deploy-k8s-metrics-server.sh "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${PULL_MODE_CLUSTER_NAME}"

# wait all of clusters member1, member2 and member3 status is ready
util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_1_NAME}"
util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_2_NAME}"
util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_NAME}"

#step8. merge temporary kubeconfig of member clusters by kubectl
#step9. merge temporary kubeconfig of member clusters by kubectl
export KUBECONFIG=$(find ${KUBECONFIG_PATH} -maxdepth 1 -type f | grep ${MEMBER_TMP_CONFIG_PREFIX} | tr '\n' ':')
kubectl config view --flatten > ${MEMBER_CLUSTER_KUBECONFIG}
rm $(find ${KUBECONFIG_PATH} -maxdepth 1 -type f | grep ${MEMBER_TMP_CONFIG_PREFIX})
Expand Down
199 changes: 199 additions & 0 deletions test/e2e/cronfederatedhpa_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,199 @@
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package e2e

import (
"time"

"github.com/onsi/ginkgo/v2"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/utils/pointer"

autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
"github.com/karmada-io/karmada/test/e2e/framework"
"github.com/karmada-io/karmada/test/helper"
)

/*
CronFederatedHPA focus on scaling FederatedHPA or other resource with scale subresource (e.g. Deployment, StatefulSet).
Test Case Overview:
case 1:
Scale FederatedHPA.
case 2:
Scale deployment.
case 3:
Test suspend rule in CronFederatedHPA
case 4:
Test unsuspend rule then suspend it in CronFederatedHPA
*/
var _ = ginkgo.Describe("[CronFederatedHPA] CronFederatedHPA testing", func() {
var cronFHPAName, fhpaName, policyName, deploymentName string
var cronFHPA *autoscalingv1alpha1.CronFederatedHPA
var fhpa *autoscalingv1alpha1.FederatedHPA
var deployment *appsv1.Deployment
var policy *policyv1alpha1.PropagationPolicy

ginkgo.BeforeEach(func() {
cronFHPAName = cronFedratedHPANamePrefix + rand.String(RandomStrLength)
fhpaName = federatedHPANamePrefix + rand.String(RandomStrLength)
policyName = deploymentNamePrefix + rand.String(RandomStrLength)
deploymentName = policyName

deployment = helper.NewDeployment(testNamespace, deploymentName)
policy = helper.NewPropagationPolicy(testNamespace, policyName, []policyv1alpha1.ResourceSelector{
{
APIVersion: deployment.APIVersion,
Kind: deployment.Kind,
Name: deploymentName,
},
}, policyv1alpha1.Placement{
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
ClusterNames: framework.ClusterNames(),
},
ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{
ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided,
ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
},
})
})

ginkgo.JustBeforeEach(func() {
framework.CreatePropagationPolicy(karmadaClient, policy)
framework.CreateDeployment(kubeClient, deployment)
ginkgo.DeferCleanup(func() {
framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name)
framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name)
})
})

// case 1: Scale FederatedHPA.
ginkgo.Context("Scale FederatedHPA", func() {
targetMinReplicas := pointer.Int32(2)
targetMaxReplicas := pointer.Int32(100)

ginkgo.BeforeEach(func() {
// */1 * * * * means the rule will be triggered every 1 minute
rule := helper.NewCronFederatedHPARule("scale-up", "*/1 * * * *", false, nil, targetMinReplicas, targetMaxReplicas)
fhpa = helper.NewFederatedHPA(testNamespace, fhpaName, deploymentName)
cronFHPA = helper.NewCronFederatedHPAWithScalingFHPA(testNamespace, cronFHPAName, fhpaName, rule)

framework.CreateFederatedHPA(karmadaClient, fhpa)
})

ginkgo.AfterEach(func() {
framework.RemoveFederatedHPA(karmadaClient, testNamespace, fhpaName)
framework.RemoveCronFederatedHPA(karmadaClient, testNamespace, cronFHPAName)
})

ginkgo.It("Scale FederatedHPA testing", func() {
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*fhpa.Spec.MinReplicas))

// Create CronFederatedHPA to scale FederatedHPA
framework.CreateCronFederatedHPA(karmadaClient, cronFHPA)

// Wait CronFederatedHPA to scale FederatedHPA's minReplicas which will trigger scaling deployment's replicas to minReplicas
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*targetMinReplicas))
})
})

// case 2. Scale deployment.
ginkgo.Context("Scale Deployment", func() {
targetReplicas := pointer.Int32(4)

ginkgo.BeforeEach(func() {
// */1 * * * * means the rule will be executed every 1 minute
rule := helper.NewCronFederatedHPARule("scale-up", "*/1 * * * *", false, targetReplicas, nil, nil)
cronFHPA = helper.NewCronFederatedHPAWithScalingDeployment(testNamespace, cronFHPAName, deploymentName, rule)
})

ginkgo.AfterEach(func() {
framework.RemoveCronFederatedHPA(karmadaClient, testNamespace, cronFHPAName)
})

ginkgo.It("Scale Deployment testing", func() {
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*deployment.Spec.Replicas))

// Create CronFederatedHPA to scale Deployment
framework.CreateCronFederatedHPA(karmadaClient, cronFHPA)

framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*targetReplicas))
})
})

// case 3. Test suspend rule in CronFederatedHPA
ginkgo.Context("Test suspend rule in CronFederatedHPA", func() {
ginkgo.BeforeEach(func() {
// */1 * * * * means the rule will be executed every 1 minute
rule := helper.NewCronFederatedHPARule("scale-up", "*/1 * * * *", true, pointer.Int32(30), nil, nil)
cronFHPA = helper.NewCronFederatedHPAWithScalingDeployment(testNamespace, cronFHPAName, deploymentName, rule)
})

ginkgo.AfterEach(func() {
framework.RemoveCronFederatedHPA(karmadaClient, testNamespace, cronFHPAName)
})

ginkgo.It("Test suspend rule with CronFederatedHPA", func() {
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*deployment.Spec.Replicas))

// Create CronFederatedHPA to scale Deployment
framework.CreateCronFederatedHPA(karmadaClient, cronFHPA)

// */1 * * * * means the rule will be triggered every 1 minute
// So wait for 1m30s and check whether the replicas changed and whether the suspend field works
time.Sleep(time.Minute*1 + time.Second*30)
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*deployment.Spec.Replicas))
})
})

// case 4. Test unsuspend rule then suspend it in CronFederatedHPA
ginkgo.Context("Test unsuspend rule then suspend it in CronFederatedHPA", func() {
rule := autoscalingv1alpha1.CronFederatedHPARule{}
targetReplicas := pointer.Int32(4)

ginkgo.BeforeEach(func() {
// */1 * * * * means the rule will be executed every 1 minute
rule = helper.NewCronFederatedHPARule("scale-up", "*/1 * * * *", false, targetReplicas, nil, nil)
cronFHPA = helper.NewCronFederatedHPAWithScalingDeployment(testNamespace, cronFHPAName, deploymentName, rule)
})

ginkgo.AfterEach(func() {
framework.RemoveCronFederatedHPA(karmadaClient, testNamespace, cronFHPAName)
})

ginkgo.It("Test unsuspend rule then suspend it in CronFederatedHPA", func() {
// Step 1.Check the init replicas, which should be 3(deployment.Spec.Replicas)
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*deployment.Spec.Replicas))

// Step 2.Create CronFederatedHPA to scale Deployment
framework.CreateCronFederatedHPA(karmadaClient, cronFHPA)
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*targetReplicas))

// Step 3.Update replicas to 3(deployment.Spec.Replicas)
framework.UpdateDeploymentReplicas(kubeClient, deployment, *deployment.Spec.Replicas)

// Step 4. Suspend rule
rule.Suspend = pointer.Bool(true)
framework.UpdateCronFederatedHPAWithRule(karmadaClient, testNamespace, cronFHPAName, []autoscalingv1alpha1.CronFederatedHPARule{rule})

// Step 5. Check the replicas, which should not be changed
// */1 * * * * means the rule will be triggered every 1 minute
// So wait for 1m30s and check whether the replicas changed and whether the suspend field works
time.Sleep(time.Minute*1 + time.Second*30)
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*deployment.Spec.Replicas))
})
})
})
54 changes: 54 additions & 0 deletions test/e2e/framework/cronfederatedhpa.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package framework

import (
"context"
"fmt"

"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
)

// CreateCronFederatedHPA create CronFederatedHPA with karmada client.
func CreateCronFederatedHPA(client karmada.Interface, fhpa *autoscalingv1alpha1.CronFederatedHPA) {
ginkgo.By(fmt.Sprintf("Create FederatedHPA(%s/%s)", fhpa.Namespace, fhpa.Name), func() {
_, err := client.AutoscalingV1alpha1().CronFederatedHPAs(fhpa.Namespace).Create(context.TODO(), fhpa, metav1.CreateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}

// RemoveCronFederatedHPA delete CronFederatedHPA with karmada client.
func RemoveCronFederatedHPA(client karmada.Interface, namespace, name string) {
ginkgo.By(fmt.Sprintf("Remove FederatedHPA(%s/%s)", namespace, name), func() {
err := client.AutoscalingV1alpha1().CronFederatedHPAs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}

// UpdateCronFederatedHPAWithRule update CronFederatedHPA with karmada client.
func UpdateCronFederatedHPAWithRule(client karmada.Interface, namespace, name string, rule []autoscalingv1alpha1.CronFederatedHPARule) {
ginkgo.By(fmt.Sprintf("Updating CronFederatedHPA(%s/%s)", namespace, name), func() {
newCronFederatedHPA, err := client.AutoscalingV1alpha1().CronFederatedHPAs(namespace).Get(context.TODO(), name, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())

newCronFederatedHPA.Spec.Rules = rule
_, err = client.AutoscalingV1alpha1().CronFederatedHPAs(namespace).Update(context.TODO(), newCronFederatedHPA, metav1.UpdateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
22 changes: 22 additions & 0 deletions test/e2e/framework/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -223,3 +223,25 @@ func WaitDeploymentGetByClientFitWith(client kubernetes.Interface, namespace, na
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
})
}

func WaitDeploymentReplicasFitWith(clusters []string, namespace, name string, expectReplicas int) {
ginkgo.By(fmt.Sprintf("Check deployment(%s/%s) replicas fit with expecting", namespace, name), func() {
gomega.Eventually(func() bool {
totalReplicas := 0
for _, cluster := range clusters {
clusterClient := GetClusterClient(cluster)
if clusterClient == nil {
continue
}

dep, err := clusterClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
continue
}
totalReplicas += int(*dep.Spec.Replicas)
}
klog.Infof("The total replicas of deployment(%s/%s) is %d", namespace, name, totalReplicas)
return totalReplicas == expectReplicas
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
})
}
42 changes: 42 additions & 0 deletions test/e2e/framework/federatedhpa.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package framework

import (
"context"
"fmt"

"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1"
karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
)

// CreateFederatedHPA create FederatedHPA with karmada client.
func CreateFederatedHPA(client karmada.Interface, fhpa *autoscalingv1alpha1.FederatedHPA) {
ginkgo.By(fmt.Sprintf("Create FederatedHPA(%s/%s)", fhpa.Namespace, fhpa.Name), func() {
_, err := client.AutoscalingV1alpha1().FederatedHPAs(fhpa.Namespace).Create(context.TODO(), fhpa, metav1.CreateOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}

// RemoveFederatedHPA delete FederatedHPA with karmada client.
func RemoveFederatedHPA(client karmada.Interface, namespace, name string) {
ginkgo.By(fmt.Sprintf("Remove FederatedHPA(%s/%s)", namespace, name), func() {
err := client.AutoscalingV1alpha1().FederatedHPAs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})
}
2 changes: 2 additions & 0 deletions test/e2e/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ const (
roleBindingNamePrefix = "rolebinding-"
clusterRoleBindingNamePrefix = "clusterrolebinding-"
podDisruptionBudgetNamePrefix = "poddisruptionbudget-"
federatedHPANamePrefix = "fhpa-"
cronFedratedHPANamePrefix = "cronfhpa-"

updateDeploymentReplicas = 2
updateStatefulSetReplicas = 2
Expand Down
Loading

0 comments on commit 993bdfa

Please sign in to comment.