Skip to content

Commit

Permalink
Custom metric-profile list from flag (#111)
Browse files Browse the repository at this point in the history
metrics-profile flag implementation

Signed-off-by: Raul Sevilla <[email protected]>
  • Loading branch information
rsevilla87 authored Oct 3, 2024
1 parent 8bc08a1 commit fd4c2d7
Show file tree
Hide file tree
Showing 16 changed files with 57 additions and 45 deletions.
4 changes: 3 additions & 1 deletion cluster-density.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ func NewClusterDensity(wh *workloads.WorkloadHelper, variant string) *cobra.Comm
var churnDelay, churnDuration time.Duration
var churnDeletionStrategy string
var podReadyThreshold time.Duration
var metricsProfiles []string
cmd := &cobra.Command{
Use: variant,
Short: fmt.Sprintf("Runs %v workload", variant),
Expand Down Expand Up @@ -60,7 +61,7 @@ func NewClusterDensity(wh *workloads.WorkloadHelper, variant string) *cobra.Comm
log.Errorf("image-registry deployment is not deployed")
}
}
setMetrics(cmd, "metrics-aggregated.yml")
setMetrics(cmd, metricsProfiles)
wh.Run(cmd.Name())
},
}
Expand All @@ -73,6 +74,7 @@ func NewClusterDensity(wh *workloads.WorkloadHelper, variant string) *cobra.Comm
cmd.Flags().IntVar(&churnPercent, "churn-percent", 10, "Percentage of job iterations that kube-burner will churn each round")
cmd.Flags().StringVar(&churnDeletionStrategy, "churn-deletion-strategy", "default", "Churn deletion strategy to use")
cmd.Flags().BoolVar(&svcLatency, "service-latency", false, "Enable service latency measurement")
cmd.Flags().StringSliceVar(&metricsProfiles, "metrics-profile", []string{"metrics-aggregated.yml"}, "Comma separated list of metrics profiles to use")
cmd.MarkFlagRequired("iterations")
return cmd
}
7 changes: 2 additions & 5 deletions common.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,13 @@ import (

var clusterMetadata ocpmetadata.ClusterMetadata

func setMetrics(cmd *cobra.Command, metricsProfile string) {
var metricsProfiles []string
func setMetrics(cmd *cobra.Command, metricsProfiles []string) {
profileType, _ := cmd.Root().PersistentFlags().GetString("profile-type")
switch ProfileType(profileType) {
case Reporting:
metricsProfiles = []string{"metrics-report.yml"}
case Regular:
metricsProfiles = []string{metricsProfile}
case Both:
metricsProfiles = []string{"metrics-report.yml", metricsProfile}
metricsProfiles = append(metricsProfiles, "metrics-report.yml")
}
os.Setenv("METRICS", strings.Join(metricsProfiles, ","))
}
Expand Down
4 changes: 3 additions & 1 deletion crd-scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
// NewCrdScale holds the crd-scale workload
func NewCrdScale(wh *workloads.WorkloadHelper) *cobra.Command {
var iterations int
var metricsProfiles []string
cmd := &cobra.Command{
Use: "crd-scale",
Short: "Runs crd-scale workload",
Expand All @@ -33,11 +34,12 @@ func NewCrdScale(wh *workloads.WorkloadHelper) *cobra.Command {
os.Setenv("JOB_ITERATIONS", fmt.Sprint(iterations))
},
Run: func(cmd *cobra.Command, args []string) {
setMetrics(cmd, "metrics-aggregated.yml")
setMetrics(cmd, metricsProfiles)
wh.Run(cmd.Name())
},
}
cmd.Flags().IntVar(&iterations, "iterations", 0, "Number of CRDs to create")
cmd.Flags().StringSliceVar(&metricsProfiles, "metrics-profile", []string{"metrics-aggregated.yml"}, "Comma separated list of metrics profiles to use")
cmd.MarkFlagRequired("iterations")
return cmd
}
4 changes: 3 additions & 1 deletion egressip.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@ func NewEgressIP(wh *workloads.WorkloadHelper, variant string) *cobra.Command {
var iterations, addressesPerIteration int
var externalServerIP string
var podReadyThreshold time.Duration
var metricsProfiles []string
cmd := &cobra.Command{
Use: variant,
Short: fmt.Sprintf("Runs %v workload", variant),
Expand All @@ -160,14 +161,15 @@ func NewEgressIP(wh *workloads.WorkloadHelper, variant string) *cobra.Command {
generateEgressIPs(iterations, addressesPerIteration, externalServerIP)
},
Run: func(cmd *cobra.Command, args []string) {
setMetrics(cmd, "metrics-egressip.yml")
setMetrics(cmd, metricsProfiles)
wh.Run(cmd.Name())
},
}
cmd.Flags().DurationVar(&podReadyThreshold, "pod-ready-threshold", 2*time.Minute, "Pod ready timeout threshold")
cmd.Flags().IntVar(&iterations, "iterations", 0, fmt.Sprintf("%v iterations", variant))
cmd.Flags().StringVar(&externalServerIP, "external-server-ip", "", "External server IP address")
cmd.Flags().IntVar(&addressesPerIteration, "addresses-per-iteration", 1, fmt.Sprintf("%v iterations", variant))
cmd.Flags().StringSliceVar(&metricsProfiles, "metrics-profile", []string{"metrics-egressip.yml"}, "Comma separated list of metrics profiles to use")
cmd.MarkFlagRequired("iterations")
cmd.MarkFlagRequired("external-server-ip")
return cmd
Expand Down
9 changes: 3 additions & 6 deletions index.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ import (
"encoding/json"
"fmt"
"os"
"strings"
"time"

"github.com/cloud-bulldozer/go-commons/indexers"
Expand All @@ -35,7 +34,8 @@ import (

// NewIndex orchestrates indexing for ocp wrapper
func NewIndex(metricsEndpoint *string, ocpMetaAgent *ocpmetadata.Metadata) *cobra.Command {
var metricsProfile, jobName string
var jobName string
var metricsProfiles []string
var start, end int64
var userMetadata, metricsDirectory string
var prometheusStep time.Duration
Expand Down Expand Up @@ -70,9 +70,6 @@ func NewIndex(metricsEndpoint *string, ocpMetaAgent *ocpmetadata.Metadata) *cobr
log.Fatal("Error obtaining prometheus information from cluster: ", err.Error())
}
}
metricsProfiles := strings.FieldsFunc(metricsProfile, func(r rune) bool {
return r == ',' || r == ' '
})
indexer = config.MetricsEndpoint{
Endpoint: prometheusURL,
Token: prometheusToken,
Expand Down Expand Up @@ -148,7 +145,7 @@ func NewIndex(metricsEndpoint *string, ocpMetaAgent *ocpmetadata.Metadata) *cobr
burner.IndexJobSummary([]burner.JobSummary{jobSummary}, indexerValue)
},
}
cmd.Flags().StringVarP(&metricsProfile, "metrics-profile", "m", "metrics.yml", "comma-separated list of metric profiles")
cmd.Flags().StringSliceVarP(&metricsProfiles, "metrics-profile", "m", []string{"metrics.yml"}, "Comma separated list of metrics profiles to use")
cmd.Flags().StringVar(&metricsDirectory, "metrics-directory", "collected-metrics", "Directory to dump the metrics files in, when using default local indexing")
cmd.Flags().DurationVar(&prometheusStep, "step", 30*time.Second, "Prometheus step size")
cmd.Flags().Int64Var(&start, "start", time.Now().Unix()-3600, "Epoch start time")
Expand Down
4 changes: 3 additions & 1 deletion networkpolicy.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ func NewNetworkPolicy(wh *workloads.WorkloadHelper, variant string) *cobra.Comma
var churn bool
var churnDelay, churnDuration time.Duration
var churnDeletionStrategy string
var metricsProfiles []string
cmd := &cobra.Command{
Use: variant,
Short: fmt.Sprintf("Runs %v workload", variant),
Expand All @@ -42,7 +43,7 @@ func NewNetworkPolicy(wh *workloads.WorkloadHelper, variant string) *cobra.Comma
os.Setenv("CHURN_DELETION_STRATEGY", churnDeletionStrategy)
},
Run: func(cmd *cobra.Command, args []string) {
setMetrics(cmd, "metrics.yml")
setMetrics(cmd, metricsProfiles)
wh.Run(cmd.Name())
},
}
Expand All @@ -53,6 +54,7 @@ func NewNetworkPolicy(wh *workloads.WorkloadHelper, variant string) *cobra.Comma
cmd.Flags().DurationVar(&churnDelay, "churn-delay", 2*time.Minute, "Time to wait between each churn")
cmd.Flags().IntVar(&churnPercent, "churn-percent", 10, "Percentage of job iterations that kube-burner will churn each round")
cmd.Flags().StringVar(&churnDeletionStrategy, "churn-deletion-strategy", "default", "Churn deletion strategy to use")
cmd.Flags().StringSliceVar(&metricsProfiles, "metrics-profile", []string{"metrics.yml"}, "Comma separated list of metrics profiles to use")
cmd.MarkFlagRequired("iterations")
return cmd
}
4 changes: 3 additions & 1 deletion node-density-cni.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ func NewNodeDensityCNI(wh *workloads.WorkloadHelper) *cobra.Command {
var namespacedIterations, svcLatency bool
var podReadyThreshold time.Duration
var iterationsPerNamespace int
var metricsProfiles []string
cmd := &cobra.Command{
Use: "node-density-cni",
Short: "Runs node-density-cni workload",
Expand All @@ -49,7 +50,7 @@ func NewNodeDensityCNI(wh *workloads.WorkloadHelper) *cobra.Command {
os.Setenv("SVC_LATENCY", strconv.FormatBool(svcLatency))
},
Run: func(cmd *cobra.Command, args []string) {
setMetrics(cmd, "metrics.yml")
setMetrics(cmd, metricsProfiles)
wh.Run(cmd.Name())
},
}
Expand All @@ -58,5 +59,6 @@ func NewNodeDensityCNI(wh *workloads.WorkloadHelper) *cobra.Command {
cmd.Flags().BoolVar(&namespacedIterations, "namespaced-iterations", true, "Namespaced iterations")
cmd.Flags().IntVar(&iterationsPerNamespace, "iterations-per-namespace", 1000, "Iterations per namespace")
cmd.Flags().BoolVar(&svcLatency, "service-latency", false, "Enable service latency measurement")
cmd.Flags().StringSliceVar(&metricsProfiles, "metrics-profile", []string{"metrics.yml"}, "Comma separated list of metrics profiles to use")
return cmd
}
4 changes: 3 additions & 1 deletion node-density-heavy.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ func NewNodeDensityHeavy(wh *workloads.WorkloadHelper) *cobra.Command {
var podReadyThreshold, probesPeriod time.Duration
var namespacedIterations bool
var iterationsPerNamespace int
var metricsProfiles []string
cmd := &cobra.Command{
Use: "node-density-heavy",
Short: "Runs node-density-heavy workload",
Expand All @@ -49,7 +50,7 @@ func NewNodeDensityHeavy(wh *workloads.WorkloadHelper) *cobra.Command {
os.Setenv("ITERATIONS_PER_NAMESPACE", fmt.Sprint(iterationsPerNamespace))
},
Run: func(cmd *cobra.Command, args []string) {
setMetrics(cmd, "metrics.yml")
setMetrics(cmd, metricsProfiles)
wh.Run(cmd.Name())
},
}
Expand All @@ -58,5 +59,6 @@ func NewNodeDensityHeavy(wh *workloads.WorkloadHelper) *cobra.Command {
cmd.Flags().IntVar(&podsPerNode, "pods-per-node", 245, "Pods per node")
cmd.Flags().BoolVar(&namespacedIterations, "namespaced-iterations", true, "Namespaced iterations")
cmd.Flags().IntVar(&iterationsPerNamespace, "iterations-per-namespace", 1000, "Iterations per namespace")
cmd.Flags().StringSliceVar(&metricsProfiles, "metrics-profile", []string{"metrics.yml"}, "Comma separated list of metrics profiles to use")
return cmd
}
4 changes: 3 additions & 1 deletion node-density.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ func NewNodeDensity(wh *workloads.WorkloadHelper) *cobra.Command {
var podsPerNode int
var podReadyThreshold time.Duration
var containerImage string
var metricsProfiles []string
cmd := &cobra.Command{
Use: "node-density",
Short: "Runs node-density workload",
Expand All @@ -45,12 +46,13 @@ func NewNodeDensity(wh *workloads.WorkloadHelper) *cobra.Command {
os.Setenv("CONTAINER_IMAGE", containerImage)
},
Run: func(cmd *cobra.Command, args []string) {
setMetrics(cmd, "metrics.yml")
setMetrics(cmd, metricsProfiles)
wh.Run(cmd.Name())
},
}
cmd.Flags().IntVar(&podsPerNode, "pods-per-node", 245, "Pods per node")
cmd.Flags().DurationVar(&podReadyThreshold, "pod-ready-threshold", 15*time.Second, "Pod ready timeout threshold")
cmd.Flags().StringVar(&containerImage, "container-image", "gcr.io/google_containers/pause:3.1", "Container image")
cmd.Flags().StringSliceVar(&metricsProfiles, "metrics-profile", []string{"metrics.yml"}, "Comma separated list of metrics profiles to use")
return cmd
}
6 changes: 3 additions & 3 deletions pvc-density.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ var dynamicStorageProvisioners = map[string]string{
func NewPVCDensity(wh *workloads.WorkloadHelper) *cobra.Command {

var iterations int
var storageProvisioners []string
var storageProvisioners, metricsProfiles []string
var claimSize string
var containerImage string
provisioner := "aws"
Expand All @@ -64,7 +64,7 @@ func NewPVCDensity(wh *workloads.WorkloadHelper) *cobra.Command {
os.Setenv("STORAGE_PROVISIONER", fmt.Sprint(dynamicStorageProvisioners[provisioner]))
},
Run: func(cmd *cobra.Command, args []string) {
setMetrics(cmd, "metrics.yml")
setMetrics(cmd, metricsProfiles)
wh.Run(cmd.Name())
},
}
Expand All @@ -73,6 +73,6 @@ func NewPVCDensity(wh *workloads.WorkloadHelper) *cobra.Command {
cmd.Flags().StringVar(&provisioner, "provisioner", provisioner, fmt.Sprintf("[%s]", strings.Join(storageProvisioners, " ")))
cmd.Flags().StringVar(&claimSize, "claim-size", "256Mi", "claim-size=256Mi")
cmd.Flags().StringVar(&containerImage, "container-image", "gcr.io/google_containers/pause:3.1", "Container image")

cmd.Flags().StringSliceVar(&metricsProfiles, "metrics-profile", []string{"metrics.yml"}, "Comma separated list of metrics profiles to use")
return cmd
}
9 changes: 9 additions & 0 deletions test/ocp/custom-metrics.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
- query: process_resident_memory_bytes{job="prometheus-k8s"}
metricName: prometheusRSS

- query: irate(process_cpu_seconds_total{job="prometheus-k8s"}[2m]) and on (job) topk(2,avg_over_time(process_cpu_seconds_total{job="prometheus-k8s"}[{{.elapsed}}:]))
metricName: top2PrometheusCPU

- query: prometheus_build_info
metricName: prometheusBuildInfo
instant: true
4 changes: 2 additions & 2 deletions test/ocp/metrics-endpoints.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
- endpoint: http://localhost:9090
metrics:
- metrics.yml
- custom-metrics.yml
alerts:
- alerts.yml
indexer:
Expand All @@ -10,7 +10,7 @@
type: opensearch
- endpoint: http://localhost:9090
metrics:
- metrics.yml
- custom-metrics.yml
indexer:
esServers: ["{{.ES_SERVER}}"]
insecureSkipVerify: true
Expand Down
9 changes: 0 additions & 9 deletions test/ocp/metrics.yml

This file was deleted.

22 changes: 11 additions & 11 deletions test/test-ocp.bats
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ setup_file() {

setup() {
export UUID; UUID=$(uuidgen)
export COMMON_FLAGS="--es-server=${ES_SERVER} --es-index=${ES_INDEX} --alerting=true --uuid=${UUID} --qps=5 --burst=5"
export COMMON_FLAGS="--es-server=${ES_SERVER} --es-index=${ES_INDEX} --alerting=true --qps=5 --burst=5"
}

teardown() {
Expand All @@ -30,12 +30,12 @@ teardown_file() {
}

@test "custom-workload as node-density" {
run_cmd kube-burner-ocp init --config=custom-workload.yml ${COMMON_FLAGS} --metrics-endpoint metrics-endpoints.yaml
run_cmd kube-burner-ocp init --config=custom-workload.yml --metrics-endpoint metrics-endpoints.yaml --uuid=${UUID}
check_metric_value jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
}

@test "node-density: es-indexing=true" {
run_cmd kube-burner-ocp node-density --pods-per-node=75 --pod-ready-threshold=10s ${COMMON_FLAGS}
run_cmd kube-burner-ocp node-density --pods-per-node=75 --pod-ready-threshold=10s --uuid=${UUID} ${COMMON_FLAGS}
check_metric_value etcdVersion jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
}

Expand All @@ -45,18 +45,18 @@ teardown_file() {
}

@test "cluster-density-ms: metrics-endpoint=true; es-indexing=true" {
run_cmd kube-burner-ocp cluster-density-ms --iterations=1 --churn=false --metrics-endpoint metrics-endpoints.yaml ${COMMON_FLAGS}
run_cmd kube-burner-ocp cluster-density-ms --iterations=1 --churn=false --metrics-endpoint metrics-endpoints.yaml --uuid=${UUID}
check_metric_value jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
}

@test "cluster-density-v2: profile-type=both; user-metadata=true; es-indexing=true; churning=true; svcLatency=true" {
run_cmd kube-burner-ocp cluster-density-v2 --iterations=5 --churn-duration=1m --churn-delay=5s --profile-type=both ${COMMON_FLAGS} --user-metadata=user-metadata.yml --service-latency
run_cmd kube-burner-ocp cluster-density-v2 --iterations=2 --churn-duration=1m --churn-delay=5s --profile-type=both ${COMMON_FLAGS} --user-metadata=user-metadata.yml --service-latency --uuid=${UUID}
check_metric_value cpu-kubelet jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement svcLatencyMeasurement svcLatencyQuantilesMeasurement etcdVersion
}

@test "cluster-density-v2: churn-deletion-strategy=gvr" {
run_cmd kube-burner-ocp cluster-density-v2 --iterations=2 --churn=true --churn-duration=1m --churn-delay=10s --churn-deletion-strategy=gvr ${COMMON_FLAGS}
check_metric_value etcdVersion jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
@test "cluster-density-v2: churn-deletion-strategy=gvr; custom-metrics=true" {
run_cmd kube-burner-ocp cluster-density-v2 --iterations=2 --churn=true --churn-duration=1m --churn-delay=5s --churn-deletion-strategy=gvr --metrics-profile=custom-metrics.yml ${COMMON_FLAGS} --uuid=${UUID}
check_metric_value prometheusRSS jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
}

@test "cluster-density-v2: indexing=false; churning=false" {
Expand All @@ -76,20 +76,20 @@ teardown_file() {
}

@test "index: local-indexing=true" {
run_cmd kube-burner-ocp index --uuid="${UUID}" --metrics-profile "metrics.yml,metrics.yml"
run_cmd kube-burner-ocp index --uuid=${UUID} --metrics-profile "custom-metrics.yml"
}

@test "index: metrics-endpoints=true; es-indexing=true" {
run_cmd kube-burner-ocp index --uuid="${UUID}" --metrics-endpoint metrics-endpoints.yaml --metrics-profile metrics.yml --es-server=https://search-perfscale-dev-chmf5l4sh66lvxbnadi4bznl3a.us-west-2.es.amazonaws.com:443 --es-index=ripsaw-kube-burner
}

@test "networkpolicy-multitenant" {
run_cmd kube-burner-ocp networkpolicy-multitenant --iterations 5 ${COMMON_FLAGS}
run_cmd kube-burner-ocp networkpolicy-multitenant --iterations 5 ${COMMON_FLAGS} --uuid=${UUID}
}

@test "pvc-density" {
# Since 'aws' is the chosen storage provisioner, this will only execute successfully if the ocp environment is aws
run_cmd kube-burner-ocp pvc-density --iterations=2 --provisioner=aws ${COMMON_FLAGS}
run_cmd kube-burner-ocp pvc-density --iterations=2 --provisioner=aws ${COMMON_FLAGS} --uuid=${UUID}
check_metric_value jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
}

Expand Down
4 changes: 3 additions & 1 deletion udn-density-l3-pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ func NewUDNDensityL3Pods(wh *workloads.WorkloadHelper) *cobra.Command {
var churn bool
var churnDelay, churnDuration, podReadyThreshold time.Duration
var churnDeletionStrategy string
var metricsProfiles []string
cmd := &cobra.Command{
Use: "udn-density-l3-pods",
Short: "Runs node-density-udn workload",
Expand All @@ -45,7 +46,7 @@ func NewUDNDensityL3Pods(wh *workloads.WorkloadHelper) *cobra.Command {
os.Setenv("POD_READY_THRESHOLD", fmt.Sprintf("%v", podReadyThreshold))
},
Run: func(cmd *cobra.Command, args []string) {
setMetrics(cmd, "metrics.yml")
setMetrics(cmd, metricsProfiles)
wh.Run(cmd.Name())
},
}
Expand All @@ -57,5 +58,6 @@ func NewUDNDensityL3Pods(wh *workloads.WorkloadHelper) *cobra.Command {
cmd.Flags().StringVar(&churnDeletionStrategy, "churn-deletion-strategy", "default", "Churn deletion strategy to use")
cmd.Flags().IntVar(&iterations, "iterations", 0, "Iterations")
cmd.Flags().DurationVar(&podReadyThreshold, "pod-ready-threshold", 1*time.Minute, "Pod ready timeout threshold")
cmd.Flags().StringSliceVar(&metricsProfiles, "metrics-profile", []string{"metrics.yml"}, "Comma separated list of metrics profiles to use")
return cmd
}
Loading

0 comments on commit fd4c2d7

Please sign in to comment.