From 0d9d3af159f497e51bde6881733fb14b4507b1e2 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Thu, 24 Oct 2024 14:54:14 +0200 Subject: [PATCH 01/28] feat: add initial `testkube devbox` command --- cmd/kubectl-testkube/commands/root.go | 3 + cmd/tcl/devbox-mutating-webhook/main.go | 189 +++++ cmd/tcl/kubectl-testkube/devbox/README.md | 1 + cmd/tcl/kubectl-testkube/devbox/agent.go | 179 ++++ cmd/tcl/kubectl-testkube/devbox/binary.go | 110 +++ cmd/tcl/kubectl-testkube/devbox/cloud.go | 135 +++ cmd/tcl/kubectl-testkube/devbox/cluster.go | 89 ++ cmd/tcl/kubectl-testkube/devbox/command.go | 774 ++++++++++++++++++ cmd/tcl/kubectl-testkube/devbox/forward.go | 263 ++++++ cmd/tcl/kubectl-testkube/devbox/namespace.go | 137 ++++ .../kubectl-testkube/devbox/objectstorage.go | 205 +++++ .../kubectl-testkube/devbox/podinterceptor.go | 330 ++++++++ cmd/tcl/kubectl-testkube/devbox/print.go | 96 +++ cmd/tcl/kubectl-testkube/devbox/walker.go | 30 + go.mod | 7 +- go.sum | 13 +- 16 files changed, 2559 insertions(+), 2 deletions(-) create mode 100644 cmd/tcl/devbox-mutating-webhook/main.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/README.md create mode 100644 cmd/tcl/kubectl-testkube/devbox/agent.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/binary.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/cloud.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/cluster.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/command.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/forward.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/namespace.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/objectstorage.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/podinterceptor.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/print.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/walker.go diff --git a/cmd/kubectl-testkube/commands/root.go b/cmd/kubectl-testkube/commands/root.go index 9e3bc87718..17464b48de 100644 --- a/cmd/kubectl-testkube/commands/root.go +++ b/cmd/kubectl-testkube/commands/root.go @@ -16,6 +16,7 @@ import ( "github.com/kubeshop/testkube/cmd/kubectl-testkube/commands/common/validator" "github.com/kubeshop/testkube/cmd/kubectl-testkube/commands/pro" "github.com/kubeshop/testkube/cmd/kubectl-testkube/config" + "github.com/kubeshop/testkube/cmd/tcl/kubectl-testkube/devbox" "github.com/kubeshop/testkube/pkg/telemetry" "github.com/kubeshop/testkube/pkg/ui" ) @@ -65,6 +66,8 @@ func init() { RootCmd.AddCommand(NewDockerCmd()) RootCmd.AddCommand(pro.NewLoginCmd()) + RootCmd.AddCommand(devbox.NewDevBoxCommand()) + RootCmd.SetHelpCommand(NewHelpCmd()) } diff --git a/cmd/tcl/devbox-mutating-webhook/main.go b/cmd/tcl/devbox-mutating-webhook/main.go new file mode 100644 index 0000000000..5f49cd0d44 --- /dev/null +++ b/cmd/tcl/devbox-mutating-webhook/main.go @@ -0,0 +1,189 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/wI2L/jsondiff" + admissionv1 "k8s.io/api/admission/v1" + corev1 "k8s.io/api/core/v1" + + "github.com/kubeshop/testkube/internal/common" + "github.com/kubeshop/testkube/pkg/testworkflows/testworkflowprocessor/constants" +) + +func main() { + http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + http.HandleFunc("/mutate", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + if r.Header.Get("Content-Type") != "application/json" { + http.Error(w, "invalid content type", http.StatusBadRequest) + return + } + + initImage := os.Args[1] + toolkitImage := os.Args[2] + + buf := new(bytes.Buffer) + buf.ReadFrom(r.Body) + body := buf.Bytes() + + if len(body) == 0 { + http.Error(w, "invalid request", http.StatusBadRequest) + return + } + + var review admissionv1.AdmissionReview + if err := json.Unmarshal(body, &review); err != nil { + http.Error(w, fmt.Sprintf("invalid request: %s", err), http.StatusBadRequest) + return + } + + if review.Request == nil { + http.Error(w, "invalid request: empty", http.StatusBadRequest) + return + } + if review.Request.Kind.Kind != "Pod" { + http.Error(w, fmt.Sprintf("invalid resource: %s", review.Request.Kind.Kind), http.StatusBadRequest) + return + } + + pod := corev1.Pod{} + if err := json.Unmarshal(review.Request.Object.Raw, &pod); err != nil { + http.Error(w, fmt.Sprintf("invalid pod provided: %s", err), http.StatusBadRequest) + return + } + originalPod := pod.DeepCopy() + + // Apply changes + if pod.Labels[constants.ResourceIdLabelName] != "" { + usesToolkit := false + for _, c := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { + if c.Image == toolkitImage { + usesToolkit = true + } + } + + pod.Spec.Volumes = append(pod.Spec.Volumes, corev1.Volume{ + Name: "devbox", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }) + + script := ` + set -e + /usr/bin/mc config host add minio "http://devbox-storage:9000" "minioadmin" "minioadmin" + /usr/bin/mc cp minio/devbox/binaries/testworkflow-init /.tk-devbox/init + chmod 777 /.tk-devbox/init + chmod +x /.tk-devbox/init + ls -lah /.tk-devbox` + if usesToolkit { + script = ` + set -e + /usr/bin/mc config host add minio "http://devbox-storage:9000" "minioadmin" "minioadmin" + /usr/bin/mc cp minio/devbox/binaries/testworkflow-init /.tk-devbox/init + /usr/bin/mc cp minio/devbox/binaries/testworkflow-toolkit /.tk-devbox/toolkit + chmod 777 /.tk-devbox/init + chmod 777 /.tk-devbox/toolkit + chmod +x /.tk-devbox/init + chmod +x /.tk-devbox/toolkit + ls -lah /.tk-devbox` + } + + pod.Spec.InitContainers = append([]corev1.Container{{ + Name: "devbox-init", + Image: "minio/mc:latest", + Command: []string{"/bin/sh", "-c"}, + Args: []string{script}, + }}, pod.Spec.InitContainers...) + + // TODO: Handle it better, to not be ambiguous + pod.Annotations[constants.SpecAnnotationName] = strings.ReplaceAll(pod.Annotations[constants.SpecAnnotationName], "\"/toolkit\"", "\"/.tk-devbox/toolkit\"") + pod.Annotations[constants.SpecAnnotationName] = strings.ReplaceAll(pod.Annotations[constants.SpecAnnotationName], "\"/.tktw/toolkit\"", "\"/.tk-devbox/toolkit\"") + + for i := range pod.Spec.InitContainers { + if (pod.Spec.InitContainers[i].Image == toolkitImage || pod.Spec.InitContainers[i].Image == initImage) && pod.Spec.InitContainers[i].Command[0] == "/init" { + pod.Spec.InitContainers[i].Command[0] = "/.tk-devbox/init" + } + if pod.Spec.InitContainers[i].Command[0] == "/.tktw/init" { + pod.Spec.InitContainers[i].Command[0] = "/.tk-devbox/init" + } + } + for i := range pod.Spec.Containers { + if (pod.Spec.Containers[i].Image == toolkitImage || pod.Spec.Containers[i].Image == initImage) && pod.Spec.Containers[i].Command[0] == "/init" { + pod.Spec.Containers[i].Command[0] = "/.tk-devbox/init" + } + if pod.Spec.Containers[i].Command[0] == "/.tktw/init" { + pod.Spec.Containers[i].Command[0] = "/.tk-devbox/init" + } + } + + for i := range pod.Spec.InitContainers { + pod.Spec.InitContainers[i].VolumeMounts = append(pod.Spec.InitContainers[i].VolumeMounts, corev1.VolumeMount{ + Name: "devbox", + MountPath: "/.tk-devbox", + }) + } + for i := range pod.Spec.Containers { + pod.Spec.Containers[i].VolumeMounts = append(pod.Spec.Containers[i].VolumeMounts, corev1.VolumeMount{ + Name: "devbox", + MountPath: "/.tk-devbox", + }) + } + } + + patch, err := jsondiff.Compare(originalPod, pod) + if err != nil { + http.Error(w, fmt.Sprintf("failed to build patch for changes: %s", err), http.StatusInternalServerError) + return + } + + serializedPatch, err := json.Marshal(patch) + if err != nil { + http.Error(w, fmt.Sprintf("failed to serialize patch for changes: %s", err), http.StatusInternalServerError) + return + } + + review.Response = &admissionv1.AdmissionResponse{ + UID: review.Request.UID, + Allowed: true, + PatchType: common.Ptr(admissionv1.PatchTypeJSONPatch), + Patch: serializedPatch, + } + + serializedResponse, err := json.Marshal(review) + if err != nil { + http.Error(w, fmt.Sprintf("cannot marshal result: %s", err), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, "%s", serializedResponse) + }) + + stopSignal := make(chan os.Signal, 1) + signal.Notify(stopSignal, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-stopSignal + os.Exit(0) + }() + + fmt.Println("Starting server...") + + panic(http.ListenAndServeTLS(":8443", "/certs/tls.crt", "/certs/tls.key", nil)) +} diff --git a/cmd/tcl/kubectl-testkube/devbox/README.md b/cmd/tcl/kubectl-testkube/devbox/README.md new file mode 100644 index 0000000000..a83b3b4bbe --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/README.md @@ -0,0 +1 @@ +# Development Box \ No newline at end of file diff --git a/cmd/tcl/kubectl-testkube/devbox/agent.go b/cmd/tcl/kubectl-testkube/devbox/agent.go new file mode 100644 index 0000000000..04b281a95b --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/agent.go @@ -0,0 +1,179 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devbox + +import ( + "context" + "errors" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + + "github.com/kubeshop/testkube/internal/common" + "github.com/kubeshop/testkube/pkg/cloud/client" +) + +type agentObj struct { + clientSet kubernetes.Interface + namespace string + cfg AgentConfig + pod *corev1.Pod + localPort int + localWebPort int +} + +type AgentConfig struct { + AgentImage string + ToolkitImage string + InitImage string +} + +func NewAgent(clientSet kubernetes.Interface, namespace string, cfg AgentConfig) *agentObj { + return &agentObj{ + clientSet: clientSet, + namespace: namespace, + cfg: cfg, + } +} + +func (r *agentObj) Deploy(env client.Environment, cloud *cloudObj) (err error) { + tlsInsecure := "false" + if cloud.AgentInsecure() { + tlsInsecure = "true" + } + r.pod, err = r.clientSet.CoreV1().Pods(r.namespace).Create(context.Background(), &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devbox-agent", + Labels: map[string]string{ + "testkube.io/devbox": "agent", + }, + }, + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: common.Ptr(int64(1)), + Volumes: []corev1.Volume{ + {Name: "tmp", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "nats", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + }, + ServiceAccountName: "devbox-account", + Containers: []corev1.Container{ + { + Name: "server", + Image: r.cfg.AgentImage, + Env: []corev1.EnvVar{ + {Name: "NATS_EMBEDDED", Value: "true"}, + {Name: "APISERVER_PORT", Value: "8088"}, + {Name: "APISERVER_FULLNAME", Value: "devbox-agent"}, + {Name: "DISABLE_TEST_TRIGGERS", Value: "true"}, + {Name: "DISABLE_WEBHOOKS", Value: "true"}, + {Name: "DISABLE_DEPRECATED_TESTS", Value: "true"}, + {Name: "TESTKUBE_ANALYTICS_ENABLED", Value: "false"}, + {Name: "TESTKUBE_NAMESPACE", Value: r.namespace}, + {Name: "JOB_SERVICE_ACCOUNT_NAME", Value: "devbox-account"}, + {Name: "TESTKUBE_ENABLE_IMAGE_DATA_PERSISTENT_CACHE", Value: "true"}, + {Name: "TESTKUBE_IMAGE_DATA_PERSISTENT_CACHE_KEY", Value: "testkube-image-cache"}, + {Name: "TESTKUBE_TW_TOOLKIT_IMAGE", Value: r.cfg.ToolkitImage}, + {Name: "TESTKUBE_TW_INIT_IMAGE", Value: r.cfg.InitImage}, + {Name: "TESTKUBE_PRO_API_KEY", Value: env.AgentToken}, + {Name: "TESTKUBE_PRO_ORG_ID", Value: env.OrganizationId}, + {Name: "TESTKUBE_PRO_ENV_ID", Value: env.Id}, + {Name: "TESTKUBE_PRO_URL", Value: cloud.AgentURI()}, + {Name: "TESTKUBE_PRO_TLS_INSECURE", Value: tlsInsecure}, + {Name: "TESTKUBE_PRO_TLS_SKIP_VERIFY", Value: "true"}, + }, + VolumeMounts: []corev1.VolumeMount{ + {Name: "tmp", MountPath: "/tmp"}, + {Name: "nats", MountPath: "/app/nats"}, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/health", + Port: intstr.FromInt32(8088), + Scheme: corev1.URISchemeHTTP, + }, + }, + PeriodSeconds: 1, + }, + }, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return err + } + + // Create the service + _, err = r.clientSet.CoreV1().Services(r.namespace).Create(context.Background(), &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devbox-agent", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "testkube.io/devbox": "agent", + }, + Ports: []corev1.ServicePort{ + { + Name: "api", + Protocol: "TCP", + Port: 8088, + TargetPort: intstr.FromInt32(8088), + }, + }, + }, + }, metav1.CreateOptions{}) + + return +} + +func (r *agentObj) WaitForReady() (err error) { + for { + if r.pod != nil && len(r.pod.Status.ContainerStatuses) > 0 && r.pod.Status.ContainerStatuses[0].Ready { + return nil + } + time.Sleep(500 * time.Millisecond) + pods, err := r.clientSet.CoreV1().Pods(r.namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: "testkube.io/devbox=agent", + }) + if err != nil { + return err + } + if len(pods.Items) == 0 { + return errors.New("pod not found") + } + r.pod = &pods.Items[0] + } +} + +func (r *agentObj) IP() string { + if r.pod == nil { + return "" + } + return r.pod.Status.PodIP +} + +func (r *agentObj) ClusterAddress() string { + if r.IP() == "" { + return "" + } + return fmt.Sprintf("devbox-agent:%d", 9000) +} + +func (r *agentObj) Debug() { + PrintHeader("Agent") + if r.ClusterAddress() != "" { + PrintItem("Cluster Address", r.ClusterAddress(), "") + } else { + PrintItem("Cluster Address", "unknown", "") + } +} diff --git a/cmd/tcl/kubectl-testkube/devbox/binary.go b/cmd/tcl/kubectl-testkube/devbox/binary.go new file mode 100644 index 0000000000..e27ed929d3 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/binary.go @@ -0,0 +1,110 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devbox + +import ( + "context" + "crypto/sha256" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +type binaryObj struct { + lastHash string + outputPath string + mainFilePath string + os string + arch string + mu sync.Mutex +} + +func NewBinary(mainFilePath, outputPath, os, arch string) *binaryObj { + return &binaryObj{ + mainFilePath: mainFilePath, + outputPath: outputPath, + os: os, + arch: arch, + } +} + +func (b *binaryObj) Hash() string { + return b.lastHash +} + +func (b *binaryObj) Path() string { + return b.outputPath +} + +func (b *binaryObj) Build(ctx context.Context) (hash string, err error) { + b.mu.Lock() + defer b.mu.Unlock() + + cmd := exec.Command( + "go", "build", + "-o", b.outputPath, + fmt.Sprintf("-ldflags=%s", strings.Join([]string{ + "-X github.com/kubeshop/testkube/internal/app/api/v1.SlackBotClientID=", + "-X github.com/kubeshop/testkube/internal/app/api/v1.SlackBotClientSecret=", + "-X github.com/kubeshop/testkube/pkg/telemetry.TestkubeMeasurementID=", + "-X github.com/kubeshop/testkube/pkg/telemetry.TestkubeMeasurementSecret=", + "-X github.com/kubeshop/testkube/internal/pkg/api.Version=dev", + "-X github.com/kubeshop/testkube/internal/pkg/api.Commit=000000000", + }, " ")), + "./main.go", + ) + cmd.Dir = filepath.Dir(b.mainFilePath) + cmd.Env = append(os.Environ(), + fmt.Sprintf("GOOS=%s", b.os), + fmt.Sprintf("GOARCH=%s", b.arch), + ) + r, w := io.Pipe() + cmd.Stdout = w + cmd.Stderr = w + var buf []byte + var bufMu sync.Mutex + go func() { + bufMu.Lock() + defer bufMu.Unlock() + buf, _ = io.ReadAll(r) + }() + + go func() { + <-ctx.Done() + if cmd.Process != nil { + cmd.Process.Kill() + } + }() + + if err = cmd.Run(); err != nil { + w.Close() + bufMu.Lock() + defer bufMu.Unlock() + return "", fmt.Errorf("failed to build: %s: %s", err.Error(), string(buf)) + } + w.Close() + + f, err := os.Open(b.outputPath) + if err != nil { + return "", fmt.Errorf("failed to get hash: reading binary: %s", err.Error()) + } + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return "", fmt.Errorf("failed to get hash: %s", err.Error()) + } + + b.lastHash = fmt.Sprintf("%x", h.Sum(nil)) + return b.lastHash, nil +} diff --git a/cmd/tcl/kubectl-testkube/devbox/cloud.go b/cmd/tcl/kubectl-testkube/devbox/cloud.go new file mode 100644 index 0000000000..815b472888 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/cloud.go @@ -0,0 +1,135 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devbox + +import ( + "errors" + "fmt" + "regexp" + "strings" + + "github.com/kubeshop/testkube/cmd/kubectl-testkube/config" + "github.com/kubeshop/testkube/pkg/cloud/client" +) + +type cloudObj struct { + cfg config.CloudContext + envClient *client.EnvironmentsClient + list []client.Environment +} + +func NewCloud(cfg config.CloudContext) (*cloudObj, error) { + if cfg.ApiKey == "" || cfg.OrganizationId == "" || cfg.OrganizationName == "" { + return nil, errors.New("login to the organization first") + } + if strings.HasPrefix(cfg.AgentUri, "https://") { + cfg.AgentUri = strings.TrimPrefix(cfg.AgentUri, "https://") + if !regexp.MustCompile(`:\d+$`).MatchString(cfg.AgentUri) { + cfg.AgentUri += ":443" + } + } else if strings.HasPrefix(cfg.AgentUri, "http://") { + cfg.AgentUri = strings.TrimPrefix(cfg.AgentUri, "http://") + if !regexp.MustCompile(`:\d+$`).MatchString(cfg.AgentUri) { + cfg.AgentUri += ":80" + } + } + // TODO: FIX THAT + if strings.HasPrefix(cfg.AgentUri, "api.") { + cfg.AgentUri = "agent." + strings.TrimPrefix(cfg.AgentUri, "api.") + } + envClient := client.NewEnvironmentsClient(cfg.ApiUri, cfg.ApiKey, cfg.OrganizationId) + obj := &cloudObj{ + cfg: cfg, + envClient: envClient, + } + + err := obj.UpdateList() + if err != nil { + return nil, err + } + return obj, nil +} + +func (c *cloudObj) List() []client.Environment { + return c.list +} + +func (c *cloudObj) ListObsolete() []client.Environment { + obsolete := make([]client.Environment, 0) + for _, env := range c.list { + if !env.Connected { + obsolete = append(obsolete, env) + } + } + return obsolete +} + +func (c *cloudObj) UpdateList() error { + list, err := c.envClient.List() + if err != nil { + return err + } + result := make([]client.Environment, 0) + for i := range list { + if strings.HasPrefix(list[i].Name, "devbox-") { + result = append(result, list[i]) + } + } + c.list = result + return nil +} + +func (c *cloudObj) AgentURI() string { + return c.cfg.AgentUri +} + +func (c *cloudObj) AgentInsecure() bool { + return strings.HasPrefix(c.cfg.ApiUri, "http://") +} + +func (c *cloudObj) ApiURI() string { + return c.cfg.ApiUri +} + +func (c *cloudObj) ApiKey() string { + return c.cfg.ApiKey +} + +func (c *cloudObj) ApiInsecure() bool { + return strings.HasPrefix(c.cfg.ApiUri, "http://") +} + +func (c *cloudObj) DashboardUrl(id, path string) string { + return strings.TrimSuffix(fmt.Sprintf("%s/organization/%s/environment/%s/", c.cfg.UiUri, c.cfg.OrganizationId, id)+strings.TrimPrefix(path, "/"), "/") +} + +func (c *cloudObj) CreateEnvironment(name string) (*client.Environment, error) { + env, err := c.envClient.Create(client.Environment{ + Name: name, + Owner: c.cfg.OrganizationId, + OrganizationId: c.cfg.OrganizationId, + }) + if err != nil { + return nil, err + } + c.list = append(c.list, env) + return &env, nil +} + +func (c *cloudObj) DeleteEnvironment(id string) error { + return c.envClient.Delete(id) +} + +func (c *cloudObj) Debug() { + PrintHeader("Control Plane") + PrintItem("Organization", c.cfg.OrganizationName, c.cfg.OrganizationId) + PrintItem("API URL", c.cfg.ApiUri, "") + PrintItem("UI URL", c.cfg.UiUri, "") + PrintItem("Agent Server", c.cfg.AgentUri, "") +} diff --git a/cmd/tcl/kubectl-testkube/devbox/cluster.go b/cmd/tcl/kubectl-testkube/devbox/cluster.go new file mode 100644 index 0000000000..b58f0186d2 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/cluster.go @@ -0,0 +1,89 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devbox + +import ( + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/kubeshop/testkube/pkg/k8sclient" +) + +type clusterObj struct { + cfg *rest.Config + clientSet *kubernetes.Clientset + versionInfo *version.Info +} + +func NewCluster() (*clusterObj, error) { + config, err := rest.InClusterConfig() + if err != nil { + config, err = k8sclient.GetK8sClientConfig() + if err != nil { + return nil, errors.Wrap(err, "failed to get Kubernetes config") + } + } + clientSet, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, errors.Wrap(err, "failed to create Kubernetes client") + } + info, err := clientSet.ServerVersion() + if err != nil { + return nil, errors.Wrap(err, "failed to get Kubernetes cluster details") + } + + return &clusterObj{ + clientSet: clientSet, + versionInfo: info, + cfg: config, + }, nil +} + +func (c *clusterObj) Debug() { + PrintHeader("Cluster") + PrintItem("Address", c.cfg.Host, "") + PrintItem("Platform", c.versionInfo.Platform, "") + PrintItem("Version", c.versionInfo.GitVersion, "") +} + +func (c *clusterObj) ClientSet() *kubernetes.Clientset { + return c.clientSet +} + +func (c *clusterObj) Config() *rest.Config { + return c.cfg +} + +func (c *clusterObj) Namespace(name string) *namespaceObj { + return NewNamespace(c.clientSet, name) +} + +func (c *clusterObj) ImageRegistry(namespace string) *imageRegistryObj { + return NewImageRegistry(c.clientSet, c.cfg, namespace) +} + +func (c *clusterObj) ObjectStorage(namespace string) *objectStorageObj { + return NewObjectStorage(c.clientSet, c.cfg, namespace) +} + +func (c *clusterObj) PodInterceptor(namespace string) *podInterceptorObj { + return NewPodInterceptor(c.clientSet, c.cfg, namespace) +} + +func (c *clusterObj) OperatingSystem() string { + return strings.Split(c.versionInfo.Platform, "/")[0] +} + +func (c *clusterObj) Architecture() string { + return strings.Split(c.versionInfo.Platform, "/")[1] +} diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go new file mode 100644 index 0000000000..6d0fca3919 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -0,0 +1,774 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devbox + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "os" + "os/signal" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/minio/minio-go/v7" + "github.com/pterm/pterm" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + + testworkflowsv1 "github.com/kubeshop/testkube-operator/api/testworkflows/v1" + common2 "github.com/kubeshop/testkube/cmd/kubectl-testkube/commands/common" + "github.com/kubeshop/testkube/cmd/kubectl-testkube/config" + "github.com/kubeshop/testkube/cmd/testworkflow-toolkit/artifacts" + "github.com/kubeshop/testkube/internal/common" + "github.com/kubeshop/testkube/pkg/api/v1/client" + "github.com/kubeshop/testkube/pkg/mapper/testworkflows" + "github.com/kubeshop/testkube/pkg/ui" +) + +var ( + workflows []testworkflowsv1.TestWorkflow + templates []testworkflowsv1.TestWorkflowTemplate +) + +func load(filePaths []string) (workflows []testworkflowsv1.TestWorkflow, templates []testworkflowsv1.TestWorkflowTemplate) { + found := map[string]struct{}{} + for _, filePath := range filePaths { + err := filepath.Walk(filePath, func(path string, info fs.FileInfo, err error) error { + if info.IsDir() { + return nil + } + // Ignore already registered file path + if _, ok := found[path]; ok { + return nil + } + // Ignore non-YAML files + if !strings.HasSuffix(path, ".yml") && !strings.HasSuffix(path, ".yaml") { + return nil + } + + // Read the files + found[path] = struct{}{} + + // Parse the YAML file + file, err := os.Open(path) + if err != nil { + fmt.Printf(ui.Red("%s: failed to read: %s\n"), path, err.Error()) + return nil + } + + decoder := yaml.NewDecoder(file) + for { + var obj map[string]interface{} + err := decoder.Decode(&obj) + if errors.Is(err, io.EOF) { + file.Close() + break + } + if err != nil { + fmt.Printf(ui.Red("%s: failed to parse yaml: %s\n"), path, err.Error()) + break + } + + if obj["kind"] == nil || !(obj["kind"].(string) == "TestWorkflow" || obj["kind"].(string) == "TestWorkflowTemplate") { + continue + } + + if obj["kind"].(string) == "TestWorkflow" { + bytes, _ := yaml.Marshal(obj) + tw := testworkflowsv1.TestWorkflow{} + err := common.DeserializeCRD(&tw, bytes) + if tw.Name == "" { + continue + } + if err != nil { + fmt.Printf(ui.Red("%s: failed to deserialize TestWorkflow: %s\n"), path, err.Error()) + continue + } + workflows = append(workflows, tw) + } else if obj["kind"].(string) == "TestWorkflowTemplate" { + bytes, _ := yaml.Marshal(obj) + tw := testworkflowsv1.TestWorkflowTemplate{} + err := common.DeserializeCRD(&tw, bytes) + if tw.Name == "" { + continue + } + if err != nil { + fmt.Printf(ui.Red("%s: failed to deserialize TestWorkflowTemplate: %s\n"), path, err.Error()) + continue + } + templates = append(templates, tw) + } + } + file.Close() + return nil + }) + ui.ExitOnError(fmt.Sprintf("Reading '%s'", filePath), err) + } + return +} + +func NewDevBoxCommand() *cobra.Command { + var ( + rawDevboxName string + autoAccept bool + baseAgentImage string + baseInitImage string + baseToolkitImage string + syncResources []string + ) + + ask := func(label string) bool { + if autoAccept { + return true + } + accept, _ := pterm.DefaultInteractiveConfirm.WithDefaultValue(true).Show(label) + return accept + } + + cmd := &cobra.Command{ + Use: "devbox", + Hidden: true, + Aliases: []string{"dev"}, + Run: func(cmd *cobra.Command, args []string) { + devboxName := fmt.Sprintf("devbox-%s", rawDevboxName) + + // Load Testkube configuration + cfg, err := config.Load() + if err != nil { + pterm.Error.Printfln("Failed to load config file: %s", err.Error()) + return + } + cloud, err := NewCloud(cfg.CloudContext) + if err != nil { + pterm.Error.Printfln("Failed to connect to Control Plane: %s", err.Error()) + return + } + + // Print debug data for the Control Plane + cloud.Debug() + + // Detect obsolete devbox environments + if obsolete := cloud.ListObsolete(); len(obsolete) > 0 { + if ask(fmt.Sprintf("Should delete %d obsolete devbox environments?", len(obsolete))) { + count := 0 + for _, env := range obsolete { + // TODO: Delete namespaces too + err := cloud.DeleteEnvironment(env.Id) + if err != nil { + pterm.Error.Printfln("Failed to delete obsolete devbox environment (%s): %s", env.Name, err.Error()) + } else { + count++ + } + } + pterm.Success.Printfln("Deleted %d/%d obsolete devbox environments", count, len(obsolete)) + } + } + + // Verify if the User accepts this Kubernetes cluster + if !ask("Should continue with that organization?") { + return + } + + // Connect to Kubernetes cluster + cluster, err := NewCluster() + if err != nil { + pterm.Error.Printfln("Failed to connect to Kubernetes cluster: %s", err.Error()) + return + } + + // Print debug data for the cluster + cluster.Debug() + + // Verify if the User accepts this Kubernetes cluster + if !ask("Should continue with that cluster?") { + return + } + + // Print devbox information + PrintHeader("Development box") + PrintItem("Name", devboxName, "") + + interceptorBinarySource := findFile("cmd/tcl/devbox-mutating-webhook/main.go") + agentBinarySource := findFile("cmd/api-server/main.go") + toolkitBinarySource := findFile("cmd/testworkflow-toolkit/main.go") + initBinarySource := findFile("cmd/testworkflow-init/main.go") + + agentImageSource := findFile("build/api-server/Dockerfile") + toolkitImageSource := findFile("build/testworkflow-toolkit/Dockerfile") + initImageSource := findFile("build/testworkflow-init/Dockerfile") + + if interceptorBinarySource == "" { + pterm.Error.Printfln("Pod Interceptor: source not found in the current tree.") + return + } else { + PrintItem("Pod Interceptor", "build from source", filepath.Dir(interceptorBinarySource)) + } + + if agentBinarySource == "" || agentImageSource == "" { + pterm.Error.Printfln("Agent: source not found in the current tree.") + return + } else { + PrintItem("Agent", "build from source", filepath.Dir(agentBinarySource)) + } + + if initBinarySource == "" || initImageSource == "" { + pterm.Error.Printfln("Init Process: source not found in the current tree.") + return + } else { + PrintItem("Init Process", "build from source", filepath.Dir(initBinarySource)) + } + if toolkitBinarySource == "" || toolkitImageSource == "" { + pterm.Error.Printfln("Toolkit: source not found in the current tree.") + return + } else { + PrintItem("Toolkit", "build from source", filepath.Dir(toolkitBinarySource)) + } + + // Create devbox environment + if !ask("Continue creating devbox environment?") { + return + } + + // Configure access objects + ns := cluster.Namespace(devboxName) + storage := cluster.ObjectStorage(devboxName) + interceptor := cluster.PodInterceptor(devboxName) + agent := NewAgent(cluster.ClientSet(), devboxName, AgentConfig{ + AgentImage: baseAgentImage, + ToolkitImage: baseToolkitImage, + InitImage: baseInitImage, + }) + + // Destroying + ctx, ctxCancel := context.WithCancel(context.Background()) + stopSignal := make(chan os.Signal, 1) + signal.Notify(stopSignal, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-stopSignal + ctxCancel() + }() + + // Prepare spinners + PrintActionHeader("Setting up...") + endSpinner := PrintSpinner( + "environment", "Creating environment", + "namespace", "Configuring cluster namespace", + "storage", "Deploying object storage", + "storageReady", "Waiting for object storage readiness", + "storageForwarding", "Forwarding object storage ports", + "interceptor", "Building Pod interceptor", + "interceptorDeploy", "Deploying Pod interceptor", + "interceptorReady", "Waiting for Pod interceptor readiness", + "interceptorEnable", "Enabling Pod interceptor", + "agent", "Deploying Agent", + "agentReady", "Waiting for Agent readiness", + ) + + // Create the environment in the organization + env, err := cloud.CreateEnvironment(devboxName) + if err != nil { + endSpinner("environment", err) + return + } + endSpinner("environment") + + // Create the namespace + if err = ns.Create(); err != nil { + endSpinner("namespace", err) + return + } + endSpinner("namespace") + + // Deploy object storage + if err = storage.Deploy(); err != nil { + endSpinner("storage", err) + return + } + endSpinner("storage") + + // Wait for object storage readiness + if err = storage.WaitForReady(); err != nil { + endSpinner("storageReady", err) + return + } + endSpinner("storageReady") + + // Wait for object storage port forwarding + if err = storage.Forward(); err != nil { + endSpinner("storageForwarding", err) + return + } + endSpinner("storageForwarding") + + // Building the Pod interceptor + interceptorBinaryFilePath := "/tmp/devbox-pod-interceptor" + interceptorBinary := NewBinary( + interceptorBinarySource, + interceptorBinaryFilePath, + cluster.OperatingSystem(), + cluster.Architecture(), + ) + if _, err = interceptorBinary.Build(ctx); err != nil { + endSpinner("interceptor", err) + return + } + endSpinner("interceptor") + + // Deploying the Pod interceptor + if err = interceptor.Deploy(interceptorBinaryFilePath, baseInitImage, baseToolkitImage); err != nil { + endSpinner("interceptorDeploy", err) + return + } + endSpinner("interceptorDeploy") + + // Wait for Pod interceptor readiness + if err = interceptor.WaitForReady(); err != nil { + endSpinner("interceptorReady", err) + return + } + endSpinner("interceptorReady") + + // Enable Pod interceptor + if err = interceptor.Enable(); err != nil { + endSpinner("interceptorEnable", err) + return + } + endSpinner("interceptorEnable") + + // Deploy Agent + if err = agent.Deploy(*env, cloud); err != nil { + endSpinner("agent", err) + return + } + endSpinner("agent") + + // Wait for Agent readiness + if err = agent.WaitForReady(); err != nil { + endSpinner("agentReady", err) + return + } + endSpinner("agentReady") + + PrintHeader("Environment") + PrintItem("Environment ID", env.Id, "") + PrintItem("Agent Token", env.AgentToken, "") + PrintItem("Dashboard", cloud.DashboardUrl(env.Id, ""), "") + + //imageRegistry.Debug() + storage.Debug() + agent.Debug() + + // CONNECTING TO THE CLOUD + agentBinaryFilePath := filepath.Join(filepath.Dir(agentImageSource), "testkube-api-server") + agentBinary := NewBinary( + agentBinarySource, + agentBinaryFilePath, + cluster.OperatingSystem(), + cluster.Architecture(), + ) + initBinaryFilePath := filepath.Join(filepath.Dir(initImageSource), "testworkflow-init") + initBinary := NewBinary( + initBinarySource, + initBinaryFilePath, + cluster.OperatingSystem(), + cluster.Architecture(), + ) + toolkitBinaryFilePath := filepath.Join(filepath.Dir(toolkitImageSource), "testworkflow-toolkit") + toolkitBinary := NewBinary( + toolkitBinarySource, + toolkitBinaryFilePath, + cluster.OperatingSystem(), + cluster.Architecture(), + ) + + storageClient, err := storage.Connect() + if err != nil { + ui.Fail(fmt.Errorf("failed to connect to the Object Storage: %s", err)) + } + storageClient.CreateBucket(ctx, "devbox") + + buildImages := func(ctx context.Context) (bool, error) { + fmt.Println("Building...") + var errsMu sync.Mutex + errs := make([]error, 0) + agentChanged := false + initChanged := false + toolkitChanged := false + ts := time.Now() + var wg sync.WaitGroup + wg.Add(3) + go func() { + prevHash := agentBinary.Hash() + hash, err := agentBinary.Build(ctx) + if err != nil { + errsMu.Lock() + errs = append(errs, err) + errsMu.Unlock() + } else { + if prevHash != hash { + agentChanged = true + } + } + wg.Done() + }() + go func() { + prevHash := initBinary.Hash() + hash, err := initBinary.Build(ctx) + if err != nil { + errsMu.Lock() + errs = append(errs, err) + errsMu.Unlock() + } else { + if prevHash != hash { + initChanged = true + } + } + wg.Done() + }() + go func() { + prevHash := toolkitBinary.Hash() + hash, err := toolkitBinary.Build(ctx) + if err != nil { + errsMu.Lock() + errs = append(errs, err) + errsMu.Unlock() + } else { + if prevHash != hash { + toolkitChanged = true + } + } + wg.Done() + }() + wg.Wait() + + if errors.Is(ctx.Err(), context.Canceled) { + return false, context.Canceled + } + + fmt.Println("Built binaries in", time.Since(ts)) + + if len(errs) == 0 && ctx.Err() == nil && (initChanged || toolkitChanged || agentChanged) { + fmt.Println("Packing...") + ts = time.Now() + count := 0 + if initChanged { + count++ + } + if toolkitChanged { + count++ + } + if agentChanged { + count++ + } + + tarFile, err := os.Create("/tmp/devbox-binaries.tar.gz") + if err != nil { + return false, err + } + tarStream := artifacts.NewTarStream() + var mu sync.Mutex + go func() { + mu.Lock() + io.Copy(tarFile, tarStream) + mu.Unlock() + }() + + if initChanged { + file, err := os.Open(initBinaryFilePath) + if err != nil { + return false, err + } + fileStat, err := file.Stat() + if err != nil { + file.Close() + return false, err + } + tarStream.Add("testworkflow-init", file, fileStat) + file.Close() + } + if toolkitChanged { + file, err := os.Open(toolkitBinaryFilePath) + if err != nil { + return false, err + } + fileStat, err := file.Stat() + if err != nil { + file.Close() + return false, err + } + tarStream.Add("testworkflow-toolkit", file, fileStat) + file.Close() + } + if agentChanged { + file, err := os.Open(agentBinaryFilePath) + if err != nil { + return false, err + } + fileStat, err := file.Stat() + if err != nil { + file.Close() + return false, err + } + tarStream.Add("testkube-api-server", file, fileStat) + file.Close() + } + + tarStream.Close() + mu.Lock() + mu.Unlock() + + fmt.Printf("Packed %d binaries in %s\n", count, time.Since(ts)) + ts = time.Now() + + if ctx.Err() != nil { + return false, nil + } + + fmt.Println("Uploading...") + tarFile, err = os.Open("/tmp/devbox-binaries.tar.gz") + if err != nil { + return false, err + } + defer tarFile.Close() + tarFileStat, err := tarFile.Stat() + if err != nil { + return false, err + } + err = storageClient.SaveFileDirect(ctx, "binaries", "binaries.tar.gz", tarFile, tarFileStat.Size(), minio.PutObjectOptions{ + DisableMultipart: true, + ContentEncoding: "gzip", + ContentType: "application/gzip", + UserMetadata: map[string]string{ + "X-Amz-Meta-Snowball-Auto-Extract": "true", + "X-Amz-Meta-Minio-Snowball-Prefix": "binaries", + }, + }) + os.Remove("/tmp/devbox-binaries.tar.gz") + + if count > 0 && ctx.Err() == nil { + fmt.Printf("Uploaded %d binaries in %s\n", count, time.Since(ts)) + } + } + + return initChanged || agentChanged || toolkitChanged, errors.Join(errs...) + } + + buildImages(ctx) + + // Load Test Workflows from file system + if len(syncResources) > 0 { + workflows, templates = load(syncResources) + fmt.Printf("found %d Test Workflows in file system (and %d templates)\n", len(workflows), len(templates)) + } + + // Inject Test Workflows from file system + common2.GetClient(cmd) // refresh token + cloudClient, err := client.GetClient(client.ClientCloud, client.Options{ + Insecure: cloud.AgentInsecure(), + ApiUri: cloud.ApiURI(), + CloudApiKey: cloud.ApiKey(), + CloudOrganization: env.OrganizationId, + CloudEnvironment: env.Id, + CloudApiPathPrefix: fmt.Sprintf("/organizations/%s/environments/%s/agent", env.OrganizationId, env.Id), + }) + if err != nil { + ui.Warn(fmt.Sprintf("failed to connect to cloud: %s", err.Error())) + } else { + var errs atomic.Int32 + queue := make(chan struct{}, 30) + wg := sync.WaitGroup{} + wg.Add(len(templates)) + for _, w := range templates { + go func(w testworkflowsv1.TestWorkflowTemplate) { + queue <- struct{}{} + _, err = cloudClient.CreateTestWorkflowTemplate(testworkflows.MapTestWorkflowTemplateKubeToAPI(w)) + if err != nil { + errs.Add(1) + fmt.Printf("failed to create test workflow template: %s: %s\n", w.Name, err.Error()) + } + <-queue + wg.Done() + }(w) + } + wg.Wait() + fmt.Printf("Uploaded %d/%d templates.\n", len(templates)-int(errs.Load()), len(templates)) + errs.Swap(0) + wg = sync.WaitGroup{} + wg.Add(len(workflows)) + for _, w := range workflows { + go func(w testworkflowsv1.TestWorkflow) { + queue <- struct{}{} + _, err = cloudClient.CreateTestWorkflow(testworkflows.MapTestWorkflowKubeToAPI(w)) + if err != nil { + errs.Add(1) + fmt.Printf("failed to create test workflow: %s: %s\n", w.Name, err.Error()) + } + <-queue + wg.Done() + }(w) + } + wg.Wait() + fmt.Printf("Uploaded %d/%d workflows.\n", len(workflows)-int(errs.Load()), len(workflows)) + } + + fsWatcher, err := fsnotify.NewWatcher() + if err != nil { + ui.Fail(err) + } + + var watchFsRecursive func(dirPath string) error + watchFsRecursive = func(dirPath string) error { + if err := fsWatcher.Add(dirPath); err != nil { + return err + } + return filepath.WalkDir(dirPath, func(path string, d fs.DirEntry, err error) error { + if err != nil || !d.IsDir() { + return nil + } + if filepath.Base(path)[0] == '.' { + // Ignore dot-files + return nil + } + if path == dirPath { + return nil + } + return watchFsRecursive(path) + }) + } + go func() { + triggerCtx, cancelTrigger := context.WithCancel(ctx) + defer cancelTrigger() + trigger := func(triggerCtx context.Context) { + select { + case <-triggerCtx.Done(): + case <-time.After(300 * time.Millisecond): + changed, err := buildImages(triggerCtx) + if ctx.Err() != nil { + return + } + if err == nil { + if changed { + fmt.Println("Build finished. Changes detected") + } else { + fmt.Println("Build finished. No changes detected") + } + } else { + fmt.Println("Build finished. Error:", err.Error()) + } + } + } + for { + select { + case event, ok := <-fsWatcher.Events: + if !ok { + return + } + fileinfo, err := os.Stat(event.Name) + if err != nil { + continue + } + if fileinfo.IsDir() { + if event.Has(fsnotify.Create) { + if err = watchFsRecursive(event.Name); err != nil { + fmt.Println("failed to watch", event.Name) + } + } + continue + } + if !strings.HasSuffix(event.Name, ".go") { + continue + } + if !event.Has(fsnotify.Create) && !event.Has(fsnotify.Write) && !event.Has(fsnotify.Remove) { + continue + } + fmt.Println("File changed:", event.Name) + + cancelTrigger() + triggerCtx, cancelTrigger = context.WithCancel(ctx) + go trigger(triggerCtx) + case err, ok := <-fsWatcher.Errors: + if !ok { + return + } + fmt.Println("Filesystem watcher error:", err.Error()) + } + } + }() + err = watchFsRecursive(filepath.Clean(toolkitImageSource + "/../../..")) + if err != nil { + ui.Fail(err) + } + defer fsWatcher.Close() + fmt.Println("Watching", filepath.Clean(toolkitImageSource+"/../../.."), "for changes") + + <-ctx.Done() + + // DESTROYING + + PrintActionHeader("Cleaning up...") + endSpinner = PrintSpinner( + "namespace", "Deleting cluster namespace", + "environment", "Deleting environment", + "interceptor", "Deleting interceptor", + ) + + wg := sync.WaitGroup{} + wg.Add(3) + + // Destroy the namespace + go func() { + defer wg.Done() + if err = ns.Destroy(); err != nil { + endSpinner("namespace", err) + } else { + endSpinner("namespace") + } + }() + + // Destroy the environment + go func() { + defer wg.Done() + if err = cloud.DeleteEnvironment(env.Id); err != nil { + endSpinner("environment", err) + } else { + endSpinner("environment") + } + }() + + // Destroy the interceptor + go func() { + defer wg.Done() + if err = interceptor.Disable(); err != nil { + endSpinner("interceptor", err) + } else { + endSpinner("interceptor") + } + }() + + wg.Wait() + }, + } + + cmd.Flags().StringVarP(&rawDevboxName, "name", "n", fmt.Sprintf("%d", time.Now().UnixNano()), "devbox name") + cmd.Flags().StringSliceVarP(&syncResources, "sync", "s", nil, "synchronise resources at paths") + cmd.Flags().StringVar(&baseInitImage, "init-image", "kubeshop/testkube-tw-init:latest", "base init image") + cmd.Flags().StringVar(&baseToolkitImage, "toolkit-image", "kubeshop/testkube-tw-toolkit:latest", "base toolkit image") + cmd.Flags().StringVar(&baseAgentImage, "agent-image", "kubeshop/testkube-api-server:latest", "base agent image") + cmd.Flags().BoolVarP(&autoAccept, "yes", "y", false, "auto accept without asking for confirmation") + + return cmd +} diff --git a/cmd/tcl/kubectl-testkube/devbox/forward.go b/cmd/tcl/kubectl-testkube/devbox/forward.go new file mode 100644 index 0000000000..3cc2ae6c6b --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/forward.go @@ -0,0 +1,263 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devbox + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io" + "math/big" + "net" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" + + "github.com/kubeshop/testkube/pkg/ui" +) + +func GetFreePort() (port int, err error) { + var a *net.TCPAddr + if a, err = net.ResolveTCPAddr("tcp", "localhost:0"); err == nil { + var l *net.TCPListener + if l, err = net.ListenTCP("tcp", a); err == nil { + defer l.Close() + return l.Addr().(*net.TCPAddr).Port, nil + } + } + return +} + +func ForwardPodPort(config *rest.Config, namespace, podName string, clusterPort, localPort int) error { + middlewarePort, err := GetFreePort() + if err != nil { + return err + } + transport, upgrader, err := spdy.RoundTripperFor(config) + if err != nil { + return err + } + path := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", namespace, podName) + hostIP := strings.TrimLeft(config.Host, "https://") + serverURL := url.URL{Scheme: "https", Path: path, Host: hostIP} + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, &serverURL) + stopChan, readyChan := make(chan struct{}, 1), make(chan struct{}, 1) + out, errOut := new(bytes.Buffer), new(bytes.Buffer) + forwarder, err := portforward.New(dialer, []string{fmt.Sprintf("%d:%d", middlewarePort, clusterPort)}, stopChan, readyChan, out, errOut) + if err != nil { + return err + } + go func() { + if err = forwarder.ForwardPorts(); err != nil { + ui.Fail(errors.Wrap(err, "failed to forward ports")) + } + fmt.Println("finish forwarding ports") + }() + + // Hack to handle Kubernetes Port Forwarding issue. + // Stream through a different server, to ensure that both connections are fully read, with no broken pipe. + // @see {@link https://github.com/kubernetes/kubernetes/issues/74551} + ln, err := net.Listen("tcp", fmt.Sprintf(":%d", localPort)) + if err != nil { + return err + } + go func() { + defer ln.Close() + for { + conn, err := ln.Accept() + if err == nil { + go func(conn net.Conn) { + defer conn.Close() + open, err := net.Dial("tcp", fmt.Sprintf(":%d", middlewarePort)) + if err != nil { + return + } + defer open.Close() + var wg sync.WaitGroup + wg.Add(2) + go func() { + io.Copy(open, conn) + wg.Done() + }() + go func() { + io.Copy(conn, open) + wg.Done() + }() + wg.Wait() + + // Read all before closing + io.ReadAll(conn) + io.ReadAll(open) + }(conn) + } + } + }() + + for range readyChan { + } + go func() { + for { + http.NewRequest(http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d", localPort), nil) + time.Sleep(1 * time.Second) + } + }() + + return nil +} + +func CreateCertificate(cert x509.Certificate) (rcaPEM, rcrtPEM, rkeyPEM []byte, err error) { + // Build CA + ca := &x509.Certificate{ + SerialNumber: big.NewInt(11111), + Subject: pkix.Name{ + Organization: []string{"Kubeshop"}, + Country: []string{"US"}, + Province: []string{""}, + Locality: []string{"Wilmington"}, + StreetAddress: []string{"Orange St"}, + PostalCode: []string{"19801"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(10, 0, 0), + IsCA: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + return nil, nil, nil, err + } + caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey) + if err != nil { + return nil, nil, nil, err + } + caPEM := new(bytes.Buffer) + pem.Encode(caPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + }) + caPrivKeyPEM := new(bytes.Buffer) + pem.Encode(caPrivKeyPEM, &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(caPrivKey), + }) + + // Build the direct certificate + cert.NotBefore = ca.NotBefore + cert.NotAfter = ca.NotAfter + cert.SerialNumber = big.NewInt(11111) + cert.Subject = ca.Subject + cert.SubjectKeyId = []byte{1, 2, 3, 4, 6} + cert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth} + cert.KeyUsage = x509.KeyUsageDigitalSignature + + certPrivKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + return nil, nil, nil, err + } + certBytes, err := x509.CreateCertificate(rand.Reader, &cert, ca, &certPrivKey.PublicKey, caPrivKey) + if err != nil { + return nil, nil, nil, err + } + certPEM := new(bytes.Buffer) + pem.Encode(certPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + }) + certPrivKeyPEM := new(bytes.Buffer) + pem.Encode(certPrivKeyPEM, &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey), + }) + + return caPEM.Bytes(), certPEM.Bytes(), certPrivKeyPEM.Bytes(), nil +} + +func CreateSelfSignedCertificate(tml x509.Certificate) (tls.Certificate, []byte, []byte, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return tls.Certificate{}, nil, nil, err + } + keyPem := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}) + tml.NotBefore = time.Now() + tml.NotAfter = time.Now().AddDate(5, 0, 0) + tml.SerialNumber = big.NewInt(123456) + tml.BasicConstraintsValid = true + cert, err := x509.CreateCertificate(rand.Reader, &tml, &tml, &key.PublicKey, key) + if err != nil { + return tls.Certificate{}, nil, nil, err + } + certPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert}) + tlsCert, err := tls.X509KeyPair(certPem, keyPem) + if err != nil { + return tls.Certificate{}, nil, nil, err + } + return tlsCert, certPem, keyPem, nil +} + +func ProxySSL(sourcePort, sslPort int) error { + tlsCert, _, _, err := CreateSelfSignedCertificate(x509.Certificate{ + IPAddresses: []net.IP{net.ParseIP("0.0.0.0"), net.ParseIP("127.0.0.1")}, + }) + if err != nil { + return err + } + ln, err := tls.Listen("tcp", fmt.Sprintf(":%d", sslPort), &tls.Config{ + Certificates: []tls.Certificate{tlsCert}, + InsecureSkipVerify: true, + }) + if err != nil { + return err + } + go func() { + defer ln.Close() + + for { + conn, err := ln.Accept() + if err == nil { + go func(conn net.Conn) { + defer conn.Close() + open, err := net.Dial("tcp", fmt.Sprintf(":%d", sourcePort)) + if err != nil { + return + } + defer open.Close() + var wg sync.WaitGroup + wg.Add(2) + go func() { + io.Copy(open, conn) + wg.Done() + }() + go func() { + io.Copy(conn, open) + wg.Done() + }() + wg.Wait() + + io.ReadAll(conn) + io.ReadAll(open) + }(conn) + } + } + }() + return nil +} diff --git a/cmd/tcl/kubectl-testkube/devbox/namespace.go b/cmd/tcl/kubectl-testkube/devbox/namespace.go new file mode 100644 index 0000000000..70673859c7 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/namespace.go @@ -0,0 +1,137 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devbox + +import ( + "context" + "strings" + "time" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + + "github.com/kubeshop/testkube/internal/common" +) + +type namespaceObj struct { + clientSet kubernetes.Interface + namespace string + ns *corev1.Namespace +} + +func NewNamespace(clientSet kubernetes.Interface, namespace string) *namespaceObj { + return &namespaceObj{ + clientSet: clientSet, + namespace: namespace, + } +} + +func (n *namespaceObj) ServiceAccountName() string { + return "devbox-account" +} + +func (n *namespaceObj) Create() (err error) { + if n.ns != nil { + return nil + } + + // Create namespace + for { + n.ns, err = n.clientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: n.namespace, + Labels: map[string]string{ + "testkube.io/devbox": "namespace", + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + if strings.Contains(err.Error(), "being deleted") { + time.Sleep(200 * time.Millisecond) + continue + } + return errors.Wrap(err, "failed to create namespace") + } + break + } + + // Create service account + serviceAccount, err := n.clientSet.CoreV1().ServiceAccounts(n.namespace).Create(context.Background(), &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{Name: n.ServiceAccountName()}, + }, metav1.CreateOptions{}) + if err != nil { + return errors.Wrap(err, "failed to create service account") + } + + // Create service account role + role, err := n.clientSet.RbacV1().Roles(n.namespace).Create(context.Background(), &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devbox-account-role", + }, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"get", "watch", "list", "create", "delete", "deletecollection"}, + APIGroups: []string{"batch"}, + Resources: []string{"jobs"}, + }, + { + Verbs: []string{"get", "watch", "list", "create", "patch", "update", "delete", "deletecollection"}, + APIGroups: []string{""}, + Resources: []string{"pods", "persistentvolumeclaims", "secrets", "configmaps"}, + }, + { + Verbs: []string{"get", "watch", "list"}, + APIGroups: []string{""}, + Resources: []string{"pods/log", "events"}, + }, + { + Verbs: []string{"get", "watch", "list", "create", "patch", "update", "delete", "deletecollection"}, + APIGroups: []string{"testworkflows.testkube.io"}, + Resources: []string{"testworkflows", "testworkflows/status", "testworkflowtemplates"}, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return errors.Wrap(err, "failed to create role binding") + } + + // Create service account role binding + _, err = n.clientSet.RbacV1().RoleBindings(n.namespace).Create(context.Background(), &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: "devbox-account-rb"}, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: serviceAccount.Name, + Namespace: n.namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: role.Name, + }, + }, metav1.CreateOptions{}) + + return nil +} + +func (n *namespaceObj) Destroy() error { + err := n.clientSet.CoreV1().Namespaces().Delete(context.Background(), n.namespace, metav1.DeleteOptions{ + GracePeriodSeconds: common.Ptr(int64(0)), + PropagationPolicy: common.Ptr(metav1.DeletePropagationForeground), + }) + if k8serrors.IsNotFound(err) { + return nil + } + return err +} diff --git a/cmd/tcl/kubectl-testkube/devbox/objectstorage.go b/cmd/tcl/kubectl-testkube/devbox/objectstorage.go new file mode 100644 index 0000000000..4d54232c2a --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/objectstorage.go @@ -0,0 +1,205 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devbox + +import ( + "context" + "errors" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/kubeshop/testkube/internal/common" + "github.com/kubeshop/testkube/pkg/storage/minio" +) + +type objectStorageObj struct { + clientSet kubernetes.Interface + kubernetesConfig *rest.Config + namespace string + pod *corev1.Pod + localPort int + localWebPort int +} + +func NewObjectStorage(clientSet kubernetes.Interface, kubernetesConfig *rest.Config, namespace string) *objectStorageObj { + return &objectStorageObj{ + clientSet: clientSet, + namespace: namespace, + kubernetesConfig: kubernetesConfig, + } +} + +func (r *objectStorageObj) Deploy() (err error) { + r.pod, err = r.clientSet.CoreV1().Pods(r.namespace).Create(context.Background(), &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devbox-storage", + Labels: map[string]string{ + "testkube.io/devbox": "storage", + }, + }, + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: common.Ptr(int64(1)), + Containers: []corev1.Container{ + { + Name: "minio", + Image: "minio/minio:RELEASE.2024-10-13T13-34-11Z", + Args: []string{"server", "/data", "--console-address", ":9090"}, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt32(9000), + }, + }, + PeriodSeconds: 1, + }, + }, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return err + } + + // Create the service + _, err = r.clientSet.CoreV1().Services(r.namespace).Create(context.Background(), &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devbox-storage", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "testkube.io/devbox": "storage", + }, + Ports: []corev1.ServicePort{ + { + Name: "api", + Protocol: "TCP", + Port: 9000, + TargetPort: intstr.FromInt32(9000), + }, + }, + }, + }, metav1.CreateOptions{}) + + return +} + +func (r *objectStorageObj) WaitForReady() (err error) { + for { + if r.pod != nil && len(r.pod.Status.ContainerStatuses) > 0 && r.pod.Status.ContainerStatuses[0].Ready { + return nil + } + time.Sleep(500 * time.Millisecond) + pods, err := r.clientSet.CoreV1().Pods(r.namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: "testkube.io/devbox=storage", + }) + if err != nil { + return err + } + if len(pods.Items) == 0 { + return errors.New("pod not found") + } + r.pod = &pods.Items[0] + } +} + +func (r *objectStorageObj) IP() string { + if r.pod == nil { + return "" + } + return r.pod.Status.PodIP +} + +func (r *objectStorageObj) ClusterAddress() string { + ip := r.IP() + if ip == "" { + return "" + } + return fmt.Sprintf("%s:%d", ip, 9000) +} + +func (r *objectStorageObj) LocalAddress() string { + if r.localPort == 0 { + return "" + } + return fmt.Sprintf("0.0.0.0:%d", r.localPort) +} + +func (r *objectStorageObj) LocalWebAddress() string { + if r.localWebPort == 0 { + return "" + } + return fmt.Sprintf("127.0.0.1:%d", r.localWebPort) +} + +func (r *objectStorageObj) Forward() error { + if r.pod == nil { + return errors.New("pod not found") + } + if r.localPort != 0 { + return nil + } + port, err := GetFreePort() + if r.localWebPort != 0 { + return nil + } + webPort, err := GetFreePort() + if err != nil { + return err + } + err = ForwardPodPort(r.kubernetesConfig, r.pod.Namespace, r.pod.Name, 9000, port) + if err != nil { + return err + } + r.localPort = port + err = ForwardPodPort(r.kubernetesConfig, r.pod.Namespace, r.pod.Name, 9090, webPort) + if err != nil { + return err + } + r.localWebPort = webPort + return nil +} + +func (r *objectStorageObj) Connect() (*minio.Client, error) { + minioClient := minio.NewClient( + r.LocalAddress(), + "minioadmin", + "minioadmin", + "", + "", + "devbox", + ) + err := minioClient.Connect() + return minioClient, err +} + +func (r *objectStorageObj) Debug() { + PrintHeader("Object Storage") + if r.ClusterAddress() != "" { + PrintItem("Cluster Address", r.ClusterAddress(), "") + } else { + PrintItem("Cluster Address", "unknown", "") + } + if r.LocalAddress() != "" { + PrintItem("Local Address", r.LocalAddress(), "") + } else { + PrintItem("Local Address", "not forwarded", "") + } + if r.LocalWebAddress() != "" { + PrintItem("Console", "http://"+r.LocalWebAddress(), "minioadmin / minioadmin") + } else { + PrintItem("Console", "not forwarded", "") + } +} diff --git a/cmd/tcl/kubectl-testkube/devbox/podinterceptor.go b/cmd/tcl/kubectl-testkube/devbox/podinterceptor.go new file mode 100644 index 0000000000..7cd1cc18a9 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/podinterceptor.go @@ -0,0 +1,330 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devbox + +import ( + "context" + "crypto/x509" + "errors" + "fmt" + "io" + "os" + "sync" + "time" + + "github.com/kballard/go-shellquote" + errors2 "github.com/pkg/errors" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" + + "github.com/kubeshop/testkube/cmd/testworkflow-toolkit/artifacts" + "github.com/kubeshop/testkube/internal/common" + "github.com/kubeshop/testkube/pkg/testworkflows/testworkflowprocessor/constants" +) + +type podInterceptorObj struct { + clientSet *kubernetes.Clientset + kubernetesConfig *rest.Config + namespace string + pod *corev1.Pod + caPem []byte + localPort int + localSslPort int +} + +func NewPodInterceptor(clientSet *kubernetes.Clientset, kubernetesConfig *rest.Config, namespace string) *podInterceptorObj { + return &podInterceptorObj{ + clientSet: clientSet, + namespace: namespace, + kubernetesConfig: kubernetesConfig, + } +} + +func (r *podInterceptorObj) Deploy(binaryPath, initImage, toolkitImage string) (err error) { + caPem, certPem, keyPem, err := CreateCertificate(x509.Certificate{ + DNSNames: []string{ + fmt.Sprintf("devbox-interceptor.%s", r.namespace), + fmt.Sprintf("devbox-interceptor.%s.svc", r.namespace), + }, + }) + if err != nil { + return err + } + r.caPem = caPem + + // Deploy certificate + _, err = r.clientSet.CoreV1().Secrets(r.namespace).Create(context.Background(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devbox-interceptor-cert", + }, + Data: map[string][]byte{ + "ca.crt": caPem, + "tls.crt": certPem, + "tls.key": keyPem, + }, + }, metav1.CreateOptions{}) + + // Deploy Pod + r.pod, err = r.clientSet.CoreV1().Pods(r.namespace).Create(context.Background(), &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devbox-interceptor", + Labels: map[string]string{ + "testkube.io/devbox": "interceptor", + }, + }, + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: common.Ptr(int64(1)), + Volumes: []corev1.Volume{ + {Name: "server", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "certs", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{ + SecretName: "devbox-interceptor-cert", + }}}, + }, + Containers: []corev1.Container{ + { + Name: "interceptor", + Image: "busybox:1.36.1-musl", + Command: []string{"/bin/sh", "-c", fmt.Sprintf("while [ ! -f /app/server-ready ]; do sleep 1; done\n/app/server %s", shellquote.Join(initImage, toolkitImage))}, + VolumeMounts: []corev1.VolumeMount{ + {Name: "server", MountPath: "/app"}, + {Name: "certs", MountPath: "/certs"}, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/health", + Port: intstr.FromInt32(8443), + Scheme: corev1.URISchemeHTTPS, + }, + }, + PeriodSeconds: 1, + }, + }, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return + } + + // Create the service + _, err = r.clientSet.CoreV1().Services(r.namespace).Create(context.Background(), &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "devbox-interceptor", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "testkube.io/devbox": "interceptor", + }, + Ports: []corev1.ServicePort{ + { + Name: "api", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.FromInt32(8443), + }, + }, + }, + }, metav1.CreateOptions{}) + + // Wait for the container to be started + err = r.WaitForContainerStarted() + if err != nil { + return + } + + // Apply the binary + req := r.clientSet.CoreV1().RESTClient(). + Post(). + Resource("pods"). + Name(r.pod.Name). + Namespace(r.namespace). + SubResource("exec"). + VersionedParams(&corev1.PodExecOptions{ + Container: "interceptor", + Command: []string{"tar", "-xzf", "-", "-C", "/app"}, + Stdin: true, + Stdout: true, + Stderr: true, + TTY: false, + }, scheme.ParameterCodec) + + exec, err := remotecommand.NewSPDYExecutor(r.kubernetesConfig, "POST", req.URL()) + if err != nil { + return errors2.Wrap(err, "failed to create spdy executor") + } + + os.WriteFile("/tmp/flag", []byte{1}, 0777) + flagFile, err := os.Open("/tmp/flag") + if err != nil { + return errors2.Wrap(err, "failed to open flag file") + } + defer flagFile.Close() + flagFileStat, err := flagFile.Stat() + if err != nil { + return + } + + file, err := os.Open(binaryPath) + if err != nil { + return + } + defer file.Close() + fileStat, err := file.Stat() + if err != nil { + return + } + + tarStream := artifacts.NewTarStream() + go func() { + defer tarStream.Close() + tarStream.Add("server", file, fileStat) + tarStream.Add("server-ready", flagFile, flagFileStat) + }() + + reader, writer := io.Pipe() + var buf []byte + var bufMu sync.Mutex + go func() { + bufMu.Lock() + defer bufMu.Unlock() + buf, _ = io.ReadAll(reader) + }() + err = exec.Stream(remotecommand.StreamOptions{ + Stdin: tarStream, + Stdout: writer, + Stderr: writer, + Tty: false, + }) + if err != nil { + writer.Close() + bufMu.Lock() + defer bufMu.Unlock() + return fmt.Errorf("failed to stream: %s: %s", err.Error(), string(buf)) + } + writer.Close() + + return +} + +func (r *podInterceptorObj) WaitForContainerStarted() (err error) { + for { + if r.pod != nil && len(r.pod.Status.ContainerStatuses) > 0 && r.pod.Status.ContainerStatuses[0].Started != nil && *r.pod.Status.ContainerStatuses[0].Started { + return nil + } + time.Sleep(500 * time.Millisecond) + pods, err := r.clientSet.CoreV1().Pods(r.namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: "testkube.io/devbox=interceptor", + }) + if err != nil { + return err + } + if len(pods.Items) == 0 { + return errors.New("pod not found") + } + r.pod = &pods.Items[0] + } +} + +func (r *podInterceptorObj) WaitForReady() (err error) { + for { + if r.pod != nil && len(r.pod.Status.ContainerStatuses) > 0 && r.pod.Status.ContainerStatuses[0].Ready { + return nil + } + time.Sleep(500 * time.Millisecond) + pods, err := r.clientSet.CoreV1().Pods(r.namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: "testkube.io/devbox=interceptor", + }) + if err != nil { + return err + } + if len(pods.Items) == 0 { + return errors.New("pod not found") + } + r.pod = &pods.Items[0] + } +} + +func (r *podInterceptorObj) Enable() (err error) { + _ = r.Disable() + + _, err = r.clientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.Background(), &admissionregistrationv1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("devbox-interceptor-webhook-%s", r.namespace), + }, + Webhooks: []admissionregistrationv1.MutatingWebhook{ + { + Name: "devbox.kb.io", + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &admissionregistrationv1.ServiceReference{ + Name: "devbox-interceptor", + Namespace: r.namespace, + Path: common.Ptr("/mutate"), + Port: common.Ptr(int32(8443)), + }, + CABundle: r.caPem, + }, + Rules: []admissionregistrationv1.RuleWithOperations{ + { + Rule: admissionregistrationv1.Rule{ + APIGroups: []string{""}, + APIVersions: []string{"v1"}, + Resources: []string{"pods"}, + Scope: common.Ptr(admissionregistrationv1.NamespacedScope), + }, + Operations: []admissionregistrationv1.OperationType{ + admissionregistrationv1.Create, + }, + }, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "kubernetes.io/metadata.name", + Operator: metav1.LabelSelectorOpIn, + Values: []string{r.namespace}, + }, + }, + }, + ObjectSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: constants.ResourceIdLabelName, + Operator: metav1.LabelSelectorOpExists, + }, + }, + }, + SideEffects: common.Ptr(admissionregistrationv1.SideEffectClassNone), + AdmissionReviewVersions: []string{"v1"}, + }, + }, + }, metav1.CreateOptions{}) + return +} + +func (r *podInterceptorObj) Disable() (err error) { + return r.clientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete( + context.Background(), + fmt.Sprintf("devbox-interceptor-webhook-%s", r.namespace), + metav1.DeleteOptions{}) +} + +func (r *podInterceptorObj) IP() string { + if r.pod == nil { + return "" + } + return r.pod.Status.PodIP +} diff --git a/cmd/tcl/kubectl-testkube/devbox/print.go b/cmd/tcl/kubectl-testkube/devbox/print.go new file mode 100644 index 0000000000..4070480e30 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/print.go @@ -0,0 +1,96 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devbox + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/gookit/color" + "github.com/pterm/pterm" +) + +const ( + printItemNameLen = 20 +) + +var ( + DefaultSpinner = buildDefaultSpinner() +) + +func buildDefaultSpinner() pterm.SpinnerPrinter { + spinner := *pterm.DefaultSpinner.WithSequence(" ◐ ", " ◓ ", " ◑ ", " ◒ ") + spinner.SuccessPrinter = &pterm.PrefixPrinter{ + MessageStyle: &pterm.ThemeDefault.SuccessMessageStyle, + Prefix: pterm.Prefix{ + Style: &pterm.ThemeDefault.SuccessPrefixStyle, + Text: "✓", + }, + } + spinner.FailPrinter = &pterm.PrefixPrinter{ + MessageStyle: &pterm.ThemeDefault.ErrorMessageStyle, + Prefix: pterm.Prefix{ + Style: &pterm.ThemeDefault.ErrorPrefixStyle, + Text: "×", + }, + } + return spinner +} + +func PrintHeader(content string) { + fmt.Println("\n" + color.Blue.Render(color.Bold.Render(content))) +} + +func PrintActionHeader(content string) { + fmt.Println("\n" + color.Magenta.Render(color.Bold.Render(content))) +} + +func PrintItem(name, value, hint string) { + whitespace := strings.Repeat(" ", printItemNameLen-len(name)) + if hint != "" { + fmt.Printf("%s%s %s %s\n", whitespace, color.Bold.Render(name+":"), value, color.FgDarkGray.Render("("+hint+")")) + } else { + fmt.Printf("%s%s %s\n", whitespace, color.Bold.Render(name+":"), value) + } +} + +func PrintSpinner(nameOrLabel ...string) func(name string, err ...error) { + multi := pterm.DefaultMultiPrinter.WithUpdateDelay(10 * time.Millisecond) + messages := make(map[string]string, len(nameOrLabel)/2) + spinners := make(map[string]*pterm.SpinnerPrinter, len(nameOrLabel)/2) + + for i := 0; i < len(nameOrLabel); i += 2 { + name := nameOrLabel[i] + messages[name] = nameOrLabel[i+1] + spinners[name], _ = DefaultSpinner.WithWriter(multi.NewWriter()).Start(messages[name]) + } + + multi.Start() + + return func(name string, errs ...error) { + if spinners[name] == nil || !spinners[name].IsActive { + return + } + err := errors.Join(errs...) + if err == nil { + spinners[name].Success() + } else { + spinners[name].Fail(fmt.Sprintf("%s: %s", messages[name], err.Error())) + } + time.Sleep(10 * time.Millisecond) + for _, spinner := range spinners { + if spinner.IsActive { + return + } + } + multi.Stop() + } +} diff --git a/cmd/tcl/kubectl-testkube/devbox/walker.go b/cmd/tcl/kubectl-testkube/devbox/walker.go new file mode 100644 index 0000000000..27674be0b5 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/walker.go @@ -0,0 +1,30 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devbox + +import ( + "os" + "path/filepath" +) + +func findFile(path string) string { + cwd, _ := os.Getwd() + + // Find near in the tree + current := filepath.Clean(filepath.Join(cwd, "testkube")) + for current != filepath.Clean(filepath.Join(cwd, "..")) { + expected := filepath.Clean(filepath.Join(current, path)) + _, err := os.Stat(expected) + if err == nil { + return expected + } + current = filepath.Dir(current) + } + return "" +} diff --git a/go.mod b/go.mod index 9d2fab4906..af254a6743 100644 --- a/go.mod +++ b/go.mod @@ -18,6 +18,7 @@ require ( github.com/dustinkirkland/golang-petname v0.0.0-20191129215211-8e5a1ed0cff0 github.com/fasthttp/websocket v1.5.0 github.com/fluxcd/pkg/apis/event v0.2.0 + github.com/fsnotify/fsnotify v1.6.0 github.com/gabriel-vasile/mimetype v1.4.1 github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 github.com/gofiber/adaptor/v2 v2.1.29 @@ -63,6 +64,7 @@ require ( github.com/stretchr/testify v1.9.0 github.com/valyala/fasthttp v1.51.0 github.com/vektah/gqlparser/v2 v2.5.2-0.20230422221642-25e09f9d292d + github.com/wI2L/jsondiff v0.6.0 go.mongodb.org/mongo-driver v1.11.3 go.uber.org/zap v1.26.0 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 @@ -119,7 +121,6 @@ require ( github.com/evanphx/json-patch/v5 v5.7.0 // indirect github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -192,6 +193,10 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/thlib/go-timezone-local v0.0.0-20210907160436-ef149e42d28e // indirect + github.com/tidwall/gjson v1.17.1 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/urfave/cli/v2 v2.24.4 // indirect diff --git a/go.sum b/go.sum index b3f4327d31..6f853661bc 100644 --- a/go.sum +++ b/go.sum @@ -608,8 +608,17 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/thlib/go-timezone-local v0.0.0-20210907160436-ef149e42d28e h1:BuzhfgfWQbX0dWzYzT1zsORLnHRv3bcRcsaUk0VmXA8= github.com/thlib/go-timezone-local v0.0.0-20210907160436-ef149e42d28e/go.mod h1:/Tnicc6m/lsJE0irFMA0LfIwTBo4QP7A8IfyIv4zZKI= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= +github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -629,6 +638,8 @@ github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RV github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= github.com/vektah/gqlparser/v2 v2.5.2-0.20230422221642-25e09f9d292d h1:ibuD+jp4yLoOY4w8+5+2fDq0ufJ/noPn/cPntJMWB1E= github.com/vektah/gqlparser/v2 v2.5.2-0.20230422221642-25e09f9d292d/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs= +github.com/wI2L/jsondiff v0.6.0 h1:zrsH3FbfVa3JO9llxrcDy/XLkYPLgoMX6Mz3T2PP2AI= +github.com/wI2L/jsondiff v0.6.0/go.mod h1:D6aQ5gKgPF9g17j+E9N7aasmU1O+XvfmWm1y8UMmNpw= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= From c8a82a5d968b7fc56e230f0a51fc68609babf39f Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Fri, 25 Oct 2024 15:39:59 +0200 Subject: [PATCH 02/28] feat: clean up development tool a bit --- cmd/tcl/devbox-mutating-webhook/main.go | 15 +- cmd/tcl/kubectl-testkube/devbox/README.md | 1 - cmd/tcl/kubectl-testkube/devbox/agent.go | 179 --- cmd/tcl/kubectl-testkube/devbox/command.go | 1035 +++++++---------- .../kubectl-testkube/devbox/devutils/agent.go | 137 +++ .../devbox/{ => devutils}/binary.go | 87 +- .../devbox/devutils/certificates.go | 89 ++ .../devbox/{ => devutils}/cloud.go | 48 +- .../devbox/{ => devutils}/cluster.go | 39 +- .../devbox/devutils/crdsync.go | 246 ++++ .../devbox/{walker.go => devutils/find.go} | 20 +- .../devbox/devutils/forwarding.go | 173 +++ .../devbox/devutils/fswatcher.go | 106 ++ .../interceptor.go} | 214 ++-- .../devbox/{ => devutils}/namespace.go | 108 +- .../devbox/devutils/objectstorage.go | 193 +++ .../kubectl-testkube/devbox/devutils/pods.go | 228 ++++ cmd/tcl/kubectl-testkube/devbox/forward.go | 263 ----- .../kubectl-testkube/devbox/objectstorage.go | 205 ---- cmd/tcl/kubectl-testkube/devbox/print.go | 96 -- cmd/testworkflow-toolkit/env/client.go | 14 +- 21 files changed, 1871 insertions(+), 1625 deletions(-) delete mode 100644 cmd/tcl/kubectl-testkube/devbox/README.md delete mode 100644 cmd/tcl/kubectl-testkube/devbox/agent.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/devutils/agent.go rename cmd/tcl/kubectl-testkube/devbox/{ => devutils}/binary.go (55%) create mode 100644 cmd/tcl/kubectl-testkube/devbox/devutils/certificates.go rename cmd/tcl/kubectl-testkube/devbox/{ => devutils}/cloud.go (73%) rename cmd/tcl/kubectl-testkube/devbox/{ => devutils}/cluster.go (56%) create mode 100644 cmd/tcl/kubectl-testkube/devbox/devutils/crdsync.go rename cmd/tcl/kubectl-testkube/devbox/{walker.go => devutils/find.go} (60%) create mode 100644 cmd/tcl/kubectl-testkube/devbox/devutils/forwarding.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/devutils/fswatcher.go rename cmd/tcl/kubectl-testkube/devbox/{podinterceptor.go => devutils/interceptor.go} (53%) rename cmd/tcl/kubectl-testkube/devbox/{ => devutils}/namespace.go (51%) create mode 100644 cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/devutils/pods.go delete mode 100644 cmd/tcl/kubectl-testkube/devbox/forward.go delete mode 100644 cmd/tcl/kubectl-testkube/devbox/objectstorage.go delete mode 100644 cmd/tcl/kubectl-testkube/devbox/print.go diff --git a/cmd/tcl/devbox-mutating-webhook/main.go b/cmd/tcl/devbox-mutating-webhook/main.go index 5f49cd0d44..e5dbf1617e 100644 --- a/cmd/tcl/devbox-mutating-webhook/main.go +++ b/cmd/tcl/devbox-mutating-webhook/main.go @@ -89,7 +89,7 @@ func main() { script := ` set -e /usr/bin/mc config host add minio "http://devbox-storage:9000" "minioadmin" "minioadmin" - /usr/bin/mc cp minio/devbox/binaries/testworkflow-init /.tk-devbox/init + /usr/bin/mc cp --disable-multipart minio/devbox/bin/init /.tk-devbox/init chmod 777 /.tk-devbox/init chmod +x /.tk-devbox/init ls -lah /.tk-devbox` @@ -97,8 +97,8 @@ func main() { script = ` set -e /usr/bin/mc config host add minio "http://devbox-storage:9000" "minioadmin" "minioadmin" - /usr/bin/mc cp minio/devbox/binaries/testworkflow-init /.tk-devbox/init - /usr/bin/mc cp minio/devbox/binaries/testworkflow-toolkit /.tk-devbox/toolkit + /usr/bin/mc cp --disable-multipart minio/devbox/bin/init /.tk-devbox/init + /usr/bin/mc cp --disable-multipart minio/devbox/bin/toolkit /.tk-devbox/toolkit chmod 777 /.tk-devbox/init chmod 777 /.tk-devbox/toolkit chmod +x /.tk-devbox/init @@ -107,10 +107,11 @@ func main() { } pod.Spec.InitContainers = append([]corev1.Container{{ - Name: "devbox-init", - Image: "minio/mc:latest", - Command: []string{"/bin/sh", "-c"}, - Args: []string{script}, + Name: "devbox-init", + Image: "minio/mc:latest", + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/sh", "-c"}, + Args: []string{script}, }}, pod.Spec.InitContainers...) // TODO: Handle it better, to not be ambiguous diff --git a/cmd/tcl/kubectl-testkube/devbox/README.md b/cmd/tcl/kubectl-testkube/devbox/README.md deleted file mode 100644 index a83b3b4bbe..0000000000 --- a/cmd/tcl/kubectl-testkube/devbox/README.md +++ /dev/null @@ -1 +0,0 @@ -# Development Box \ No newline at end of file diff --git a/cmd/tcl/kubectl-testkube/devbox/agent.go b/cmd/tcl/kubectl-testkube/devbox/agent.go deleted file mode 100644 index 04b281a95b..0000000000 --- a/cmd/tcl/kubectl-testkube/devbox/agent.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2024 Testkube. -// -// Licensed as a Testkube Pro file under the Testkube Community -// License (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt - -package devbox - -import ( - "context" - "errors" - "fmt" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" - - "github.com/kubeshop/testkube/internal/common" - "github.com/kubeshop/testkube/pkg/cloud/client" -) - -type agentObj struct { - clientSet kubernetes.Interface - namespace string - cfg AgentConfig - pod *corev1.Pod - localPort int - localWebPort int -} - -type AgentConfig struct { - AgentImage string - ToolkitImage string - InitImage string -} - -func NewAgent(clientSet kubernetes.Interface, namespace string, cfg AgentConfig) *agentObj { - return &agentObj{ - clientSet: clientSet, - namespace: namespace, - cfg: cfg, - } -} - -func (r *agentObj) Deploy(env client.Environment, cloud *cloudObj) (err error) { - tlsInsecure := "false" - if cloud.AgentInsecure() { - tlsInsecure = "true" - } - r.pod, err = r.clientSet.CoreV1().Pods(r.namespace).Create(context.Background(), &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "devbox-agent", - Labels: map[string]string{ - "testkube.io/devbox": "agent", - }, - }, - Spec: corev1.PodSpec{ - TerminationGracePeriodSeconds: common.Ptr(int64(1)), - Volumes: []corev1.Volume{ - {Name: "tmp", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, - {Name: "nats", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, - }, - ServiceAccountName: "devbox-account", - Containers: []corev1.Container{ - { - Name: "server", - Image: r.cfg.AgentImage, - Env: []corev1.EnvVar{ - {Name: "NATS_EMBEDDED", Value: "true"}, - {Name: "APISERVER_PORT", Value: "8088"}, - {Name: "APISERVER_FULLNAME", Value: "devbox-agent"}, - {Name: "DISABLE_TEST_TRIGGERS", Value: "true"}, - {Name: "DISABLE_WEBHOOKS", Value: "true"}, - {Name: "DISABLE_DEPRECATED_TESTS", Value: "true"}, - {Name: "TESTKUBE_ANALYTICS_ENABLED", Value: "false"}, - {Name: "TESTKUBE_NAMESPACE", Value: r.namespace}, - {Name: "JOB_SERVICE_ACCOUNT_NAME", Value: "devbox-account"}, - {Name: "TESTKUBE_ENABLE_IMAGE_DATA_PERSISTENT_CACHE", Value: "true"}, - {Name: "TESTKUBE_IMAGE_DATA_PERSISTENT_CACHE_KEY", Value: "testkube-image-cache"}, - {Name: "TESTKUBE_TW_TOOLKIT_IMAGE", Value: r.cfg.ToolkitImage}, - {Name: "TESTKUBE_TW_INIT_IMAGE", Value: r.cfg.InitImage}, - {Name: "TESTKUBE_PRO_API_KEY", Value: env.AgentToken}, - {Name: "TESTKUBE_PRO_ORG_ID", Value: env.OrganizationId}, - {Name: "TESTKUBE_PRO_ENV_ID", Value: env.Id}, - {Name: "TESTKUBE_PRO_URL", Value: cloud.AgentURI()}, - {Name: "TESTKUBE_PRO_TLS_INSECURE", Value: tlsInsecure}, - {Name: "TESTKUBE_PRO_TLS_SKIP_VERIFY", Value: "true"}, - }, - VolumeMounts: []corev1.VolumeMount{ - {Name: "tmp", MountPath: "/tmp"}, - {Name: "nats", MountPath: "/app/nats"}, - }, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/health", - Port: intstr.FromInt32(8088), - Scheme: corev1.URISchemeHTTP, - }, - }, - PeriodSeconds: 1, - }, - }, - }, - }, - }, metav1.CreateOptions{}) - if err != nil { - return err - } - - // Create the service - _, err = r.clientSet.CoreV1().Services(r.namespace).Create(context.Background(), &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "devbox-agent", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeClusterIP, - Selector: map[string]string{ - "testkube.io/devbox": "agent", - }, - Ports: []corev1.ServicePort{ - { - Name: "api", - Protocol: "TCP", - Port: 8088, - TargetPort: intstr.FromInt32(8088), - }, - }, - }, - }, metav1.CreateOptions{}) - - return -} - -func (r *agentObj) WaitForReady() (err error) { - for { - if r.pod != nil && len(r.pod.Status.ContainerStatuses) > 0 && r.pod.Status.ContainerStatuses[0].Ready { - return nil - } - time.Sleep(500 * time.Millisecond) - pods, err := r.clientSet.CoreV1().Pods(r.namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: "testkube.io/devbox=agent", - }) - if err != nil { - return err - } - if len(pods.Items) == 0 { - return errors.New("pod not found") - } - r.pod = &pods.Items[0] - } -} - -func (r *agentObj) IP() string { - if r.pod == nil { - return "" - } - return r.pod.Status.PodIP -} - -func (r *agentObj) ClusterAddress() string { - if r.IP() == "" { - return "" - } - return fmt.Sprintf("devbox-agent:%d", 9000) -} - -func (r *agentObj) Debug() { - PrintHeader("Agent") - if r.ClusterAddress() != "" { - PrintItem("Cluster Address", r.ClusterAddress(), "") - } else { - PrintItem("Cluster Address", "unknown", "") - } -} diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index 6d0fca3919..e921b01a14 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -10,117 +10,34 @@ package devbox import ( "context" - "errors" "fmt" - "io" - "io/fs" "os" "os/signal" "path/filepath" "strings" "sync" - "sync/atomic" "syscall" "time" - "github.com/fsnotify/fsnotify" - "github.com/minio/minio-go/v7" + "github.com/pkg/errors" "github.com/pterm/pterm" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" + "golang.org/x/sync/errgroup" - testworkflowsv1 "github.com/kubeshop/testkube-operator/api/testworkflows/v1" - common2 "github.com/kubeshop/testkube/cmd/kubectl-testkube/commands/common" "github.com/kubeshop/testkube/cmd/kubectl-testkube/config" - "github.com/kubeshop/testkube/cmd/testworkflow-toolkit/artifacts" - "github.com/kubeshop/testkube/internal/common" - "github.com/kubeshop/testkube/pkg/api/v1/client" + "github.com/kubeshop/testkube/cmd/tcl/kubectl-testkube/devbox/devutils" + "github.com/kubeshop/testkube/pkg/cloud/client" "github.com/kubeshop/testkube/pkg/mapper/testworkflows" "github.com/kubeshop/testkube/pkg/ui" ) -var ( - workflows []testworkflowsv1.TestWorkflow - templates []testworkflowsv1.TestWorkflowTemplate +const ( + InterceptorMainPath = "cmd/tcl/devbox-mutating-webhook/main.go" + AgentMainPath = "cmd/api-server/main.go" + ToolkitMainPath = "cmd/testworkflow-toolkit/main.go" + InitProcessMainPath = "cmd/testworkflow-init/main.go" ) -func load(filePaths []string) (workflows []testworkflowsv1.TestWorkflow, templates []testworkflowsv1.TestWorkflowTemplate) { - found := map[string]struct{}{} - for _, filePath := range filePaths { - err := filepath.Walk(filePath, func(path string, info fs.FileInfo, err error) error { - if info.IsDir() { - return nil - } - // Ignore already registered file path - if _, ok := found[path]; ok { - return nil - } - // Ignore non-YAML files - if !strings.HasSuffix(path, ".yml") && !strings.HasSuffix(path, ".yaml") { - return nil - } - - // Read the files - found[path] = struct{}{} - - // Parse the YAML file - file, err := os.Open(path) - if err != nil { - fmt.Printf(ui.Red("%s: failed to read: %s\n"), path, err.Error()) - return nil - } - - decoder := yaml.NewDecoder(file) - for { - var obj map[string]interface{} - err := decoder.Decode(&obj) - if errors.Is(err, io.EOF) { - file.Close() - break - } - if err != nil { - fmt.Printf(ui.Red("%s: failed to parse yaml: %s\n"), path, err.Error()) - break - } - - if obj["kind"] == nil || !(obj["kind"].(string) == "TestWorkflow" || obj["kind"].(string) == "TestWorkflowTemplate") { - continue - } - - if obj["kind"].(string) == "TestWorkflow" { - bytes, _ := yaml.Marshal(obj) - tw := testworkflowsv1.TestWorkflow{} - err := common.DeserializeCRD(&tw, bytes) - if tw.Name == "" { - continue - } - if err != nil { - fmt.Printf(ui.Red("%s: failed to deserialize TestWorkflow: %s\n"), path, err.Error()) - continue - } - workflows = append(workflows, tw) - } else if obj["kind"].(string) == "TestWorkflowTemplate" { - bytes, _ := yaml.Marshal(obj) - tw := testworkflowsv1.TestWorkflowTemplate{} - err := common.DeserializeCRD(&tw, bytes) - if tw.Name == "" { - continue - } - if err != nil { - fmt.Printf(ui.Red("%s: failed to deserialize TestWorkflowTemplate: %s\n"), path, err.Error()) - continue - } - templates = append(templates, tw) - } - } - file.Close() - return nil - }) - ui.ExitOnError(fmt.Sprintf("Reading '%s'", filePath), err) - } - return -} - func NewDevBoxCommand() *cobra.Command { var ( rawDevboxName string @@ -131,635 +48,471 @@ func NewDevBoxCommand() *cobra.Command { syncResources []string ) - ask := func(label string) bool { - if autoAccept { - return true - } - accept, _ := pterm.DefaultInteractiveConfirm.WithDefaultValue(true).Show(label) - return accept - } - cmd := &cobra.Command{ Use: "devbox", Hidden: true, Aliases: []string{"dev"}, Run: func(cmd *cobra.Command, args []string) { - devboxName := fmt.Sprintf("devbox-%s", rawDevboxName) + ctx, ctxCancel := context.WithCancel(context.Background()) + stopSignal := make(chan os.Signal, 1) + signal.Notify(stopSignal, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-stopSignal + ctxCancel() + }() + + // Find repository root + rootDir := devutils.FindDirContaining(InterceptorMainPath, AgentMainPath, ToolkitMainPath, InitProcessMainPath) + if rootDir == "" { + ui.Fail(errors.New("testkube repository not found")) + } + + // Connect to cluster + cluster, err := devutils.NewCluster() + if err != nil { + ui.Fail(err) + } - // Load Testkube configuration + // Connect to Testkube cfg, err := config.Load() if err != nil { pterm.Error.Printfln("Failed to load config file: %s", err.Error()) return } - cloud, err := NewCloud(cfg.CloudContext) + cloud, err := devutils.NewCloud(cfg.CloudContext, cmd) if err != nil { - pterm.Error.Printfln("Failed to connect to Control Plane: %s", err.Error()) + pterm.Error.Printfln("Failed to connect to Cloud: %s", err.Error()) return } - // Print debug data for the Control Plane - cloud.Debug() - // Detect obsolete devbox environments if obsolete := cloud.ListObsolete(); len(obsolete) > 0 { - if ask(fmt.Sprintf("Should delete %d obsolete devbox environments?", len(obsolete))) { - count := 0 - for _, env := range obsolete { - // TODO: Delete namespaces too - err := cloud.DeleteEnvironment(env.Id) - if err != nil { - pterm.Error.Printfln("Failed to delete obsolete devbox environment (%s): %s", env.Name, err.Error()) - } else { - count++ - } + count := 0 + for _, env := range obsolete { + err := cloud.DeleteEnvironment(env.Id) + if err != nil { + fmt.Printf("Failed to delete obsolete devbox environment (%s): %s\n", env.Name, err.Error()) + continue } - pterm.Success.Printfln("Deleted %d/%d obsolete devbox environments", count, len(obsolete)) + cluster.Namespace(env.Name).Destroy() + count++ } - } - - // Verify if the User accepts this Kubernetes cluster - if !ask("Should continue with that organization?") { - return - } - - // Connect to Kubernetes cluster - cluster, err := NewCluster() - if err != nil { - pterm.Error.Printfln("Failed to connect to Kubernetes cluster: %s", err.Error()) - return - } - - // Print debug data for the cluster - cluster.Debug() - - // Verify if the User accepts this Kubernetes cluster - if !ask("Should continue with that cluster?") { - return - } - - // Print devbox information - PrintHeader("Development box") - PrintItem("Name", devboxName, "") - - interceptorBinarySource := findFile("cmd/tcl/devbox-mutating-webhook/main.go") - agentBinarySource := findFile("cmd/api-server/main.go") - toolkitBinarySource := findFile("cmd/testworkflow-toolkit/main.go") - initBinarySource := findFile("cmd/testworkflow-init/main.go") - - agentImageSource := findFile("build/api-server/Dockerfile") - toolkitImageSource := findFile("build/testworkflow-toolkit/Dockerfile") - initImageSource := findFile("build/testworkflow-init/Dockerfile") - - if interceptorBinarySource == "" { - pterm.Error.Printfln("Pod Interceptor: source not found in the current tree.") - return - } else { - PrintItem("Pod Interceptor", "build from source", filepath.Dir(interceptorBinarySource)) - } + fmt.Printf("Deleted %d/%d obsolete devbox environments\n", count, len(obsolete)) + } + + // Initialize bare cluster resources + namespace := cluster.Namespace(fmt.Sprintf("devbox-%s", rawDevboxName)) + objectStoragePod := namespace.Pod("devbox-storage") + interceptorPod := namespace.Pod("devbox-interceptor") + agentPod := namespace.Pod("devbox-agent") + + // Initialize binaries + interceptorBin := devutils.NewBinary(InterceptorMainPath, cluster.OperatingSystem(), cluster.Architecture()) + agentBin := devutils.NewBinary(AgentMainPath, cluster.OperatingSystem(), cluster.Architecture()) + toolkitBin := devutils.NewBinary(ToolkitMainPath, cluster.OperatingSystem(), cluster.Architecture()) + initProcessBin := devutils.NewBinary(InitProcessMainPath, cluster.OperatingSystem(), cluster.Architecture()) + + // Initialize wrappers over cluster resources + interceptor := devutils.NewInterceptor(interceptorPod, baseInitImage, baseToolkitImage, interceptorBin) + agent := devutils.NewAgent(agentPod, cloud, baseAgentImage, baseInitImage, baseToolkitImage) + objectStorage := devutils.NewObjectStorage(objectStoragePod) + + // Build initial binaries + g, _ := errgroup.WithContext(ctx) + fmt.Println("Building initial binaries...") + g.Go(func() error { + its := time.Now() + _, err := interceptorBin.Build(ctx) + if err != nil { + fmt.Printf("Interceptor: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + } else { + fmt.Printf("Interceptor: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + return err + }) + g.Go(func() error { + its := time.Now() + _, err := agentBin.Build(ctx) + if err != nil { + fmt.Printf("Agent: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + } else { + fmt.Printf("Agent: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + return err + }) + g.Go(func() error { + its := time.Now() + _, err := toolkitBin.Build(ctx) + if err != nil { + fmt.Printf("Toolkit: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + } else { + fmt.Printf("Toolkit: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + return err + }) + g.Go(func() error { + its := time.Now() + _, err := initProcessBin.Build(ctx) + if err != nil { + fmt.Printf("Init Process: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + } else { + fmt.Printf("Init Process: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + return err + }) + err = g.Wait() - if agentBinarySource == "" || agentImageSource == "" { - pterm.Error.Printfln("Agent: source not found in the current tree.") - return - } else { - PrintItem("Agent", "build from source", filepath.Dir(agentBinarySource)) - } + var env *client.Environment - if initBinarySource == "" || initImageSource == "" { - pterm.Error.Printfln("Init Process: source not found in the current tree.") - return - } else { - PrintItem("Init Process", "build from source", filepath.Dir(initBinarySource)) - } - if toolkitBinarySource == "" || toolkitImageSource == "" { - pterm.Error.Printfln("Toolkit: source not found in the current tree.") - return - } else { - PrintItem("Toolkit", "build from source", filepath.Dir(toolkitBinarySource)) - } + // Cleanup + cleanupCh := make(chan struct{}) + var cleanupMu sync.Mutex + cleanup := func() { + cleanupMu.Lock() - // Create devbox environment - if !ask("Continue creating devbox environment?") { - return + fmt.Println("Deleting namespace...") + if err := namespace.Destroy(); err != nil { + fmt.Println("Failed to destroy namespace:", err.Error()) + } + if env != nil && env.Id != "" { + fmt.Println("Deleting environment...") + if err = cloud.DeleteEnvironment(env.Id); err != nil { + fmt.Println("Failed to delete environment:", err.Error()) + } + } } - - // Configure access objects - ns := cluster.Namespace(devboxName) - storage := cluster.ObjectStorage(devboxName) - interceptor := cluster.PodInterceptor(devboxName) - agent := NewAgent(cluster.ClientSet(), devboxName, AgentConfig{ - AgentImage: baseAgentImage, - ToolkitImage: baseToolkitImage, - InitImage: baseInitImage, - }) - - // Destroying - ctx, ctxCancel := context.WithCancel(context.Background()) - stopSignal := make(chan os.Signal, 1) - signal.Notify(stopSignal, syscall.SIGINT, syscall.SIGTERM) go func() { - <-stopSignal - ctxCancel() + <-ctx.Done() + cleanup() + close(cleanupCh) }() - // Prepare spinners - PrintActionHeader("Setting up...") - endSpinner := PrintSpinner( - "environment", "Creating environment", - "namespace", "Configuring cluster namespace", - "storage", "Deploying object storage", - "storageReady", "Waiting for object storage readiness", - "storageForwarding", "Forwarding object storage ports", - "interceptor", "Building Pod interceptor", - "interceptorDeploy", "Deploying Pod interceptor", - "interceptorReady", "Waiting for Pod interceptor readiness", - "interceptorEnable", "Enabling Pod interceptor", - "agent", "Deploying Agent", - "agentReady", "Waiting for Agent readiness", - ) - - // Create the environment in the organization - env, err := cloud.CreateEnvironment(devboxName) + fail := func(err error) { + fmt.Println("Error:", err.Error()) + cleanup() + os.Exit(1) + } + + // Create environment in the Cloud + fmt.Println("Creating environment in Cloud...") + env, err = cloud.CreateEnvironment(namespace.Name()) if err != nil { - endSpinner("environment", err) - return + fail(errors.Wrap(err, "failed to create Cloud environment")) } - endSpinner("environment") - // Create the namespace - if err = ns.Create(); err != nil { - endSpinner("namespace", err) - return + // Create namespace + fmt.Println("Creating namespace...") + if err = namespace.Create(); err != nil { + fail(errors.Wrap(err, "failed to create namespace")) } - endSpinner("namespace") // Deploy object storage - if err = storage.Deploy(); err != nil { - endSpinner("storage", err) - return + fmt.Println("Creating object storage...") + if err = objectStorage.Create(ctx); err != nil { + fail(errors.Wrap(err, "failed to create object storage")) } - endSpinner("storage") - - // Wait for object storage readiness - if err = storage.WaitForReady(); err != nil { - endSpinner("storageReady", err) - return + fmt.Println("Waiting for object storage readiness...") + if err = objectStorage.WaitForReady(ctx); err != nil { + fail(errors.Wrap(err, "failed to wait for readiness")) } - endSpinner("storageReady") - // Wait for object storage port forwarding - if err = storage.Forward(); err != nil { - endSpinner("storageForwarding", err) - return + // Deploying interceptor + fmt.Println("Deploying interceptor...") + if err = interceptor.Create(ctx); err != nil { + fail(errors.Wrap(err, "failed to create interceptor")) } - endSpinner("storageForwarding") - - // Building the Pod interceptor - interceptorBinaryFilePath := "/tmp/devbox-pod-interceptor" - interceptorBinary := NewBinary( - interceptorBinarySource, - interceptorBinaryFilePath, - cluster.OperatingSystem(), - cluster.Architecture(), - ) - if _, err = interceptorBinary.Build(ctx); err != nil { - endSpinner("interceptor", err) - return - } - endSpinner("interceptor") - - // Deploying the Pod interceptor - if err = interceptor.Deploy(interceptorBinaryFilePath, baseInitImage, baseToolkitImage); err != nil { - endSpinner("interceptorDeploy", err) - return + fmt.Println("Waiting for interceptor readiness...") + if err = interceptor.WaitForReady(ctx); err != nil { + fail(errors.Wrap(err, "failed to create interceptor")) } - endSpinner("interceptorDeploy") - // Wait for Pod interceptor readiness - if err = interceptor.WaitForReady(); err != nil { - endSpinner("interceptorReady", err) - return - } - endSpinner("interceptorReady") + // Uploading binaries + g, _ = errgroup.WithContext(ctx) + fmt.Println("Uploading binaries...") + g.Go(func() error { + its := time.Now() + file, err := os.Open(agentBin.Path()) + if err != nil { + return err + } + defer file.Close() + err = objectStorage.Upload(ctx, "bin/testkube-api-server", file, agentBin.Hash()) + if err != nil { + fmt.Printf("Agent: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + } else { + fmt.Printf("Agent: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + return err + }) + g.Go(func() error { + its := time.Now() + file, err := os.Open(toolkitBin.Path()) + if err != nil { + return err + } + defer file.Close() + err = objectStorage.Upload(ctx, "bin/toolkit", file, toolkitBin.Hash()) + if err != nil { + fmt.Printf("Toolkit: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + } else { + fmt.Printf("Toolkit: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + return err + }) + g.Go(func() error { + its := time.Now() + file, err := os.Open(initProcessBin.Path()) + if err != nil { + return err + } + defer file.Close() + err = objectStorage.Upload(ctx, "bin/init", file, initProcessBin.Hash()) + if err != nil { + fmt.Printf("Init Process: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + } else { + fmt.Printf("Init Process: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + return err + }) + err = g.Wait() - // Enable Pod interceptor - if err = interceptor.Enable(); err != nil { - endSpinner("interceptorEnable", err) - return + // Enabling Pod interceptor + fmt.Println("Enabling interceptor...") + if err = interceptor.Enable(ctx); err != nil { + fail(errors.Wrap(err, "failed to enable interceptor")) } - endSpinner("interceptorEnable") - // Deploy Agent - if err = agent.Deploy(*env, cloud); err != nil { - endSpinner("agent", err) - return + // Deploying agent + fmt.Println("Deploying agent...") + if err = agent.Create(ctx, env); err != nil { + fail(errors.Wrap(err, "failed to create agent")) } - endSpinner("agent") - - // Wait for Agent readiness - if err = agent.WaitForReady(); err != nil { - endSpinner("agentReady", err) - return + fmt.Println("Waiting for agent readiness...") + if err = agent.WaitForReady(ctx); err != nil { + fail(errors.Wrap(err, "failed to create agent")) } - endSpinner("agentReady") - - PrintHeader("Environment") - PrintItem("Environment ID", env.Id, "") - PrintItem("Agent Token", env.AgentToken, "") - PrintItem("Dashboard", cloud.DashboardUrl(env.Id, ""), "") - - //imageRegistry.Debug() - storage.Debug() - agent.Debug() - - // CONNECTING TO THE CLOUD - agentBinaryFilePath := filepath.Join(filepath.Dir(agentImageSource), "testkube-api-server") - agentBinary := NewBinary( - agentBinarySource, - agentBinaryFilePath, - cluster.OperatingSystem(), - cluster.Architecture(), - ) - initBinaryFilePath := filepath.Join(filepath.Dir(initImageSource), "testworkflow-init") - initBinary := NewBinary( - initBinarySource, - initBinaryFilePath, - cluster.OperatingSystem(), - cluster.Architecture(), - ) - toolkitBinaryFilePath := filepath.Join(filepath.Dir(toolkitImageSource), "testworkflow-toolkit") - toolkitBinary := NewBinary( - toolkitBinarySource, - toolkitBinaryFilePath, - cluster.OperatingSystem(), - cluster.Architecture(), - ) - - storageClient, err := storage.Connect() + fmt.Println("Creating file system watcher...") + goWatcher, err := devutils.NewFsWatcher(rootDir) if err != nil { - ui.Fail(fmt.Errorf("failed to connect to the Object Storage: %s", err)) + fail(errors.Wrap(err, "failed to watch Testkube repository")) } - storageClient.CreateBucket(ctx, "devbox") - - buildImages := func(ctx context.Context) (bool, error) { - fmt.Println("Building...") - var errsMu sync.Mutex - errs := make([]error, 0) - agentChanged := false - initChanged := false - toolkitChanged := false - ts := time.Now() - var wg sync.WaitGroup - wg.Add(3) + + if len(syncResources) > 0 { + fmt.Println("Loading Test Workflows and Templates...") + sync := devutils.NewCRDSync() + + // Initial run + for _, path := range syncResources { + _ = sync.Load(path) + } + fmt.Printf("Started synchronising %d Test Workflows and %d Templates...\n", sync.WorkflowsCount(), sync.TemplatesCount()) + + // Propagate changes from FS to CRDSync + yamlWatcher, err := devutils.NewFsWatcher(syncResources...) + if err != nil { + fail(errors.Wrap(err, "failed to watch for YAML changes")) + } go func() { - prevHash := agentBinary.Hash() - hash, err := agentBinary.Build(ctx) - if err != nil { - errsMu.Lock() - errs = append(errs, err) - errsMu.Unlock() - } else { - if prevHash != hash { - agentChanged = true + for { + if ctx.Err() != nil { + break } - } - wg.Done() - }() - go func() { - prevHash := initBinary.Hash() - hash, err := initBinary.Build(ctx) - if err != nil { - errsMu.Lock() - errs = append(errs, err) - errsMu.Unlock() - } else { - if prevHash != hash { - initChanged = true + file, err := yamlWatcher.Next(ctx) + if err == nil { + _ = sync.Load(file) } } - wg.Done() }() + + // Propagate changes from CRDSync to Cloud go func() { - prevHash := toolkitBinary.Hash() - hash, err := toolkitBinary.Build(ctx) - if err != nil { - errsMu.Lock() - errs = append(errs, err) - errsMu.Unlock() - } else { - if prevHash != hash { - toolkitChanged = true + parallel := make(chan struct{}, 30) + for { + if ctx.Err() != nil { + break + } + update, err := sync.Next(ctx) + if err != nil { + continue + } + parallel <- struct{}{} + switch update.Op { + case devutils.CRDSyncUpdateOpCreate: + client, err := cloud.Client(env.Id) + if err != nil { + fail(errors.Wrap(err, "failed to create cloud client")) + } + if update.Template != nil { + update.Template.Spec.Events = nil // ignore Cronjobs + _, err := client.CreateTestWorkflowTemplate(*testworkflows.MapTemplateKubeToAPI(update.Template)) + if err != nil { + fmt.Printf("Failed to create Test Workflow Template: %s: %s\n", update.Template.Name, err.Error()) + } + } else { + update.Workflow.Spec.Events = nil // ignore Cronjobs + _, err := client.CreateTestWorkflow(*testworkflows.MapKubeToAPI(update.Workflow)) + if err != nil { + fmt.Printf("Failed to create Test Workflow: %s: %s\n", update.Workflow.Name, err.Error()) + } + } + case devutils.CRDSyncUpdateOpUpdate: + client, err := cloud.Client(env.Id) + if err != nil { + fail(errors.Wrap(err, "failed to create cloud client")) + } + if update.Template != nil { + update.Template.Spec.Events = nil // ignore Cronjobs + _, err := client.UpdateTestWorkflowTemplate(*testworkflows.MapTemplateKubeToAPI(update.Template)) + if err != nil { + fmt.Printf("Failed to update Test Workflow Template: %s: %s\n", update.Template.Name, err.Error()) + } + } else { + update.Workflow.Spec.Events = nil + _, err := client.UpdateTestWorkflow(*testworkflows.MapKubeToAPI(update.Workflow)) + if err != nil { + fmt.Printf("Failed to update Test Workflow: %s: %s\n", update.Workflow.Name, err.Error()) + } + } + case devutils.CRDSyncUpdateOpDelete: + client, err := cloud.Client(env.Id) + if err != nil { + fail(errors.Wrap(err, "failed to create cloud client")) + } + if update.Template != nil { + err := client.DeleteTestWorkflowTemplate(update.Template.Name) + if err != nil { + fmt.Printf("Failed to delete Test Workflow Template: %s: %s\n", update.Template.Name, err.Error()) + } + } else { + err := client.DeleteTestWorkflow(update.Workflow.Name) + if err != nil { + fmt.Printf("Failed to delete Test Workflow: %s: %s\n", update.Workflow.Name, err.Error()) + } + } } + <-parallel } - wg.Done() }() - wg.Wait() - - if errors.Is(ctx.Err(), context.Canceled) { - return false, context.Canceled - } + } - fmt.Println("Built binaries in", time.Since(ts)) + fmt.Println("Waiting for file changes...") - if len(errs) == 0 && ctx.Err() == nil && (initChanged || toolkitChanged || agentChanged) { - fmt.Println("Packing...") - ts = time.Now() - count := 0 - if initChanged { - count++ - } - if toolkitChanged { - count++ - } - if agentChanged { - count++ + rebuild := func(ctx context.Context) { + g, _ := errgroup.WithContext(ctx) + fmt.Println("Rebuilding binaries...") + g.Go(func() error { + its := time.Now() + _, err := agentBin.Build(ctx) + if err != nil { + fmt.Printf("Agent: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + return err } + fmt.Printf("Agent: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) - tarFile, err := os.Create("/tmp/devbox-binaries.tar.gz") + its = time.Now() + file, err := os.Open(agentBin.Path()) if err != nil { - return false, err - } - tarStream := artifacts.NewTarStream() - var mu sync.Mutex - go func() { - mu.Lock() - io.Copy(tarFile, tarStream) - mu.Unlock() - }() - - if initChanged { - file, err := os.Open(initBinaryFilePath) - if err != nil { - return false, err - } - fileStat, err := file.Stat() - if err != nil { - file.Close() - return false, err - } - tarStream.Add("testworkflow-init", file, fileStat) - file.Close() + return err } - if toolkitChanged { - file, err := os.Open(toolkitBinaryFilePath) - if err != nil { - return false, err - } - fileStat, err := file.Stat() - if err != nil { - file.Close() - return false, err - } - tarStream.Add("testworkflow-toolkit", file, fileStat) - file.Close() + defer file.Close() + err = objectStorage.Upload(ctx, "bin/testkube-api-server", file, agentBin.Hash()) + if err != nil { + fmt.Printf("Agent: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + return err } - if agentChanged { - file, err := os.Open(agentBinaryFilePath) - if err != nil { - return false, err - } - fileStat, err := file.Stat() - if err != nil { - file.Close() - return false, err + fmt.Printf("Agent: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + + // TODO: Restart only if it has changes + if time.Since(its).Truncate(time.Millisecond).String() != "0s" { + err := agentPod.Restart(ctx) + if err == nil { + fmt.Printf("Agent: restarted. Waiting for readiness...\n") + _ = agentPod.RefreshData(ctx) + err = agentPod.WaitForReady(ctx) + if ctx.Err() != nil { + return nil + } + if err == nil { + fmt.Printf("Agent: ready again\n") + } else { + fail(errors.Wrap(err, "failed to wait for agent pod readiness")) + } + } else { + fmt.Printf("Agent: restart failed: %s\n", err.Error()) } - tarStream.Add("testkube-api-server", file, fileStat) - file.Close() } - - tarStream.Close() - mu.Lock() - mu.Unlock() - - fmt.Printf("Packed %d binaries in %s\n", count, time.Since(ts)) - ts = time.Now() - - if ctx.Err() != nil { - return false, nil + return nil + }) + g.Go(func() error { + its := time.Now() + _, err := toolkitBin.Build(ctx) + if err != nil { + fmt.Printf("Toolkit: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + return err } + fmt.Printf("Toolkit: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) - fmt.Println("Uploading...") - tarFile, err = os.Open("/tmp/devbox-binaries.tar.gz") + its = time.Now() + file, err := os.Open(toolkitBin.Path()) if err != nil { - return false, err + return err } - defer tarFile.Close() - tarFileStat, err := tarFile.Stat() + defer file.Close() + err = objectStorage.Upload(ctx, "bin/toolkit", file, toolkitBin.Hash()) if err != nil { - return false, err + fmt.Printf("Toolkit: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + return err } - err = storageClient.SaveFileDirect(ctx, "binaries", "binaries.tar.gz", tarFile, tarFileStat.Size(), minio.PutObjectOptions{ - DisableMultipart: true, - ContentEncoding: "gzip", - ContentType: "application/gzip", - UserMetadata: map[string]string{ - "X-Amz-Meta-Snowball-Auto-Extract": "true", - "X-Amz-Meta-Minio-Snowball-Prefix": "binaries", - }, - }) - os.Remove("/tmp/devbox-binaries.tar.gz") - - if count > 0 && ctx.Err() == nil { - fmt.Printf("Uploaded %d binaries in %s\n", count, time.Since(ts)) + fmt.Printf("Toolkit: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + return nil + }) + g.Go(func() error { + its := time.Now() + _, err := initProcessBin.Build(ctx) + if err != nil { + fmt.Printf("Init Process: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + return err } - } - - return initChanged || agentChanged || toolkitChanged, errors.Join(errs...) - } + fmt.Printf("Init Process: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) - buildImages(ctx) - - // Load Test Workflows from file system - if len(syncResources) > 0 { - workflows, templates = load(syncResources) - fmt.Printf("found %d Test Workflows in file system (and %d templates)\n", len(workflows), len(templates)) - } - - // Inject Test Workflows from file system - common2.GetClient(cmd) // refresh token - cloudClient, err := client.GetClient(client.ClientCloud, client.Options{ - Insecure: cloud.AgentInsecure(), - ApiUri: cloud.ApiURI(), - CloudApiKey: cloud.ApiKey(), - CloudOrganization: env.OrganizationId, - CloudEnvironment: env.Id, - CloudApiPathPrefix: fmt.Sprintf("/organizations/%s/environments/%s/agent", env.OrganizationId, env.Id), - }) - if err != nil { - ui.Warn(fmt.Sprintf("failed to connect to cloud: %s", err.Error())) - } else { - var errs atomic.Int32 - queue := make(chan struct{}, 30) - wg := sync.WaitGroup{} - wg.Add(len(templates)) - for _, w := range templates { - go func(w testworkflowsv1.TestWorkflowTemplate) { - queue <- struct{}{} - _, err = cloudClient.CreateTestWorkflowTemplate(testworkflows.MapTestWorkflowTemplateKubeToAPI(w)) - if err != nil { - errs.Add(1) - fmt.Printf("failed to create test workflow template: %s: %s\n", w.Name, err.Error()) - } - <-queue - wg.Done() - }(w) - } - wg.Wait() - fmt.Printf("Uploaded %d/%d templates.\n", len(templates)-int(errs.Load()), len(templates)) - errs.Swap(0) - wg = sync.WaitGroup{} - wg.Add(len(workflows)) - for _, w := range workflows { - go func(w testworkflowsv1.TestWorkflow) { - queue <- struct{}{} - _, err = cloudClient.CreateTestWorkflow(testworkflows.MapTestWorkflowKubeToAPI(w)) - if err != nil { - errs.Add(1) - fmt.Printf("failed to create test workflow: %s: %s\n", w.Name, err.Error()) - } - <-queue - wg.Done() - }(w) - } - wg.Wait() - fmt.Printf("Uploaded %d/%d workflows.\n", len(workflows)-int(errs.Load()), len(workflows)) - } - - fsWatcher, err := fsnotify.NewWatcher() - if err != nil { - ui.Fail(err) - } - - var watchFsRecursive func(dirPath string) error - watchFsRecursive = func(dirPath string) error { - if err := fsWatcher.Add(dirPath); err != nil { - return err - } - return filepath.WalkDir(dirPath, func(path string, d fs.DirEntry, err error) error { - if err != nil || !d.IsDir() { - return nil - } - if filepath.Base(path)[0] == '.' { - // Ignore dot-files - return nil + its = time.Now() + file, err := os.Open(initProcessBin.Path()) + if err != nil { + return err } - if path == dirPath { - return nil + defer file.Close() + err = objectStorage.Upload(ctx, "bin/init", file, initProcessBin.Hash()) + if err != nil { + fmt.Printf("Init Process: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + return err } - return watchFsRecursive(path) + fmt.Printf("Init Process: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + return nil }) + err = g.Wait() } - go func() { - triggerCtx, cancelTrigger := context.WithCancel(ctx) - defer cancelTrigger() - trigger := func(triggerCtx context.Context) { - select { - case <-triggerCtx.Done(): - case <-time.After(300 * time.Millisecond): - changed, err := buildImages(triggerCtx) - if ctx.Err() != nil { - return - } - if err == nil { - if changed { - fmt.Println("Build finished. Changes detected") - } else { - fmt.Println("Build finished. No changes detected") - } - } else { - fmt.Println("Build finished. Error:", err.Error()) - } - } + + rebuildCtx, rebuildCtxCancel := context.WithCancel(ctx) + for { + if ctx.Err() != nil { + break } - for { - select { - case event, ok := <-fsWatcher.Events: - if !ok { - return - } - fileinfo, err := os.Stat(event.Name) - if err != nil { - continue - } - if fileinfo.IsDir() { - if event.Has(fsnotify.Create) { - if err = watchFsRecursive(event.Name); err != nil { - fmt.Println("failed to watch", event.Name) - } - } - continue - } - if !strings.HasSuffix(event.Name, ".go") { - continue - } - if !event.Has(fsnotify.Create) && !event.Has(fsnotify.Write) && !event.Has(fsnotify.Remove) { - continue - } - fmt.Println("File changed:", event.Name) - - cancelTrigger() - triggerCtx, cancelTrigger = context.WithCancel(ctx) - go trigger(triggerCtx) - case err, ok := <-fsWatcher.Errors: - if !ok { - return - } - fmt.Println("Filesystem watcher error:", err.Error()) + file, err := goWatcher.Next(ctx) + if err != nil { + fmt.Println("file system watcher error:", err.Error()) + } else if strings.HasSuffix(file, ".go") { + relPath, _ := filepath.Rel(rootDir, file) + if relPath == "" { + relPath = file } + fmt.Printf("%s changed\n", relPath) + rebuildCtxCancel() + rebuildCtx, rebuildCtxCancel = context.WithCancel(ctx) + go rebuild(rebuildCtx) } - }() - err = watchFsRecursive(filepath.Clean(toolkitImageSource + "/../../..")) - if err != nil { - ui.Fail(err) } - defer fsWatcher.Close() - fmt.Println("Watching", filepath.Clean(toolkitImageSource+"/../../.."), "for changes") - - <-ctx.Done() - - // DESTROYING - - PrintActionHeader("Cleaning up...") - endSpinner = PrintSpinner( - "namespace", "Deleting cluster namespace", - "environment", "Deleting environment", - "interceptor", "Deleting interceptor", - ) - - wg := sync.WaitGroup{} - wg.Add(3) - - // Destroy the namespace - go func() { - defer wg.Done() - if err = ns.Destroy(); err != nil { - endSpinner("namespace", err) - } else { - endSpinner("namespace") - } - }() - - // Destroy the environment - go func() { - defer wg.Done() - if err = cloud.DeleteEnvironment(env.Id); err != nil { - endSpinner("environment", err) - } else { - endSpinner("environment") - } - }() - - // Destroy the interceptor - go func() { - defer wg.Done() - if err = interceptor.Disable(); err != nil { - endSpinner("interceptor", err) - } else { - endSpinner("interceptor") - } - }() - wg.Wait() + <-cleanupCh }, } diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go b/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go new file mode 100644 index 0000000000..22c71f9f97 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go @@ -0,0 +1,137 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devutils + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/kubeshop/testkube/internal/common" + "github.com/kubeshop/testkube/pkg/cloud/client" +) + +type Agent struct { + pod *PodObject + localPort int + cloud *cloudObj + agentImage string + initProcessImage string + toolkitImage string +} + +func NewAgent(pod *PodObject, cloud *cloudObj, agentImage, initProcessImage, toolkitImage string) *Agent { + return &Agent{ + pod: pod, + cloud: cloud, + agentImage: agentImage, + initProcessImage: initProcessImage, + toolkitImage: toolkitImage, + } +} + +func (r *Agent) Create(ctx context.Context, env *client.Environment) error { + tlsInsecure := "false" + if r.cloud.AgentInsecure() { + tlsInsecure = "true" + } + err := r.pod.Create(ctx, &corev1.Pod{ + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: common.Ptr(int64(1)), + Volumes: []corev1.Volume{ + {Name: "tmp", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "nats", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "devbox", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + }, + ServiceAccountName: "devbox-account", + InitContainers: []corev1.Container{{ + Name: "devbox-init", + Image: "minio/mc:latest", + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/sh", "-c"}, + Args: []string{` + set -e + /usr/bin/mc config host add minio "http://devbox-storage:9000" "minioadmin" "minioadmin" + /usr/bin/mc cp --disable-multipart minio/devbox/bin/testkube-api-server /.tk-devbox/testkube-api-server + chmod 777 /.tk-devbox/testkube-api-server + chmod +x /.tk-devbox/testkube-api-server + ls -lah /.tk-devbox`}, + VolumeMounts: []corev1.VolumeMount{ + {Name: "devbox", MountPath: "/.tk-devbox"}, + }, + }}, + Containers: []corev1.Container{ + { + Name: "server", + Image: r.agentImage, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/.tk-devbox/testkube-api-server"}, + Env: []corev1.EnvVar{ + {Name: "NATS_EMBEDDED", Value: "true"}, + {Name: "APISERVER_PORT", Value: "8088"}, + {Name: "APISERVER_FULLNAME", Value: "devbox-agent"}, + {Name: "DISABLE_TEST_TRIGGERS", Value: "true"}, + {Name: "DISABLE_WEBHOOKS", Value: "true"}, + {Name: "DISABLE_DEPRECATED_TESTS", Value: "true"}, + {Name: "TESTKUBE_ANALYTICS_ENABLED", Value: "false"}, + {Name: "TESTKUBE_NAMESPACE", Value: r.pod.Namespace()}, + {Name: "JOB_SERVICE_ACCOUNT_NAME", Value: "devbox-account"}, + {Name: "TESTKUBE_ENABLE_IMAGE_DATA_PERSISTENT_CACHE", Value: "true"}, + {Name: "TESTKUBE_IMAGE_DATA_PERSISTENT_CACHE_KEY", Value: "testkube-image-cache"}, + {Name: "TESTKUBE_TW_TOOLKIT_IMAGE", Value: r.toolkitImage}, + {Name: "TESTKUBE_TW_INIT_IMAGE", Value: r.initProcessImage}, + {Name: "TESTKUBE_PRO_API_KEY", Value: env.AgentToken}, + {Name: "TESTKUBE_PRO_ORG_ID", Value: env.OrganizationId}, + {Name: "TESTKUBE_PRO_ENV_ID", Value: env.Id}, + {Name: "TESTKUBE_PRO_URL", Value: r.cloud.AgentURI()}, + {Name: "TESTKUBE_PRO_TLS_INSECURE", Value: tlsInsecure}, + {Name: "TESTKUBE_PRO_TLS_SKIP_VERIFY", Value: "true"}, + }, + VolumeMounts: []corev1.VolumeMount{ + {Name: "tmp", MountPath: "/tmp"}, + {Name: "nats", MountPath: "/app/nats"}, + {Name: "devbox", MountPath: "/.tk-devbox"}, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/health", + Port: intstr.FromInt32(8088), + Scheme: corev1.URISchemeHTTP, + }, + }, + PeriodSeconds: 1, + }, + }, + }, + }, + }) + if err != nil { + return err + } + err = r.pod.WaitForContainerStarted(ctx) + if err != nil { + return err + } + return r.pod.CreateService(ctx, corev1.ServicePort{ + Name: "api", + Protocol: "TCP", + Port: 8088, + TargetPort: intstr.FromInt32(8088), + }) +} + +func (r *Agent) WaitForReady(ctx context.Context) error { + return r.pod.WaitForReady(ctx) +} + +func (r *Agent) Restart(ctx context.Context) error { + return r.pod.Restart(ctx) +} diff --git a/cmd/tcl/kubectl-testkube/devbox/binary.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binary.go similarity index 55% rename from cmd/tcl/kubectl-testkube/devbox/binary.go rename to cmd/tcl/kubectl-testkube/devbox/devutils/binary.go index e27ed929d3..cd49245b61 100644 --- a/cmd/tcl/kubectl-testkube/devbox/binary.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binary.go @@ -6,7 +6,7 @@ // // https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt -package devbox +package devutils import ( "context" @@ -18,37 +18,60 @@ import ( "path/filepath" "strings" "sync" + + "github.com/kubeshop/testkube/pkg/tmp" ) -type binaryObj struct { - lastHash string - outputPath string - mainFilePath string - os string - arch string - mu sync.Mutex +type Binary struct { + mainPath string + outputPath string + operatingSystem string + procArchitecture string + + hash string + buildMu sync.RWMutex } -func NewBinary(mainFilePath, outputPath, os, arch string) *binaryObj { - return &binaryObj{ - mainFilePath: mainFilePath, - outputPath: outputPath, - os: os, - arch: arch, +func NewBinary(mainPath, operatingSystem, procArchitecture string) *Binary { + return &Binary{ + mainPath: mainPath, + outputPath: tmp.Name(), + operatingSystem: operatingSystem, + procArchitecture: procArchitecture, } } -func (b *binaryObj) Hash() string { - return b.lastHash +func (b *Binary) updateHash() error { + f, err := os.Open(b.outputPath) + if err != nil { + return fmt.Errorf("failed to get hash: reading binary: %s", err.Error()) + } + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return fmt.Errorf("failed to get hash: %s", err.Error()) + } + + b.hash = fmt.Sprintf("%x", h.Sum(nil)) + return nil +} + +func (b *Binary) Hash() string { + b.buildMu.RLock() + defer b.buildMu.RUnlock() + return b.hash } -func (b *binaryObj) Path() string { +func (b *Binary) Path() string { + b.buildMu.RLock() + defer b.buildMu.RUnlock() return b.outputPath } -func (b *binaryObj) Build(ctx context.Context) (hash string, err error) { - b.mu.Lock() - defer b.mu.Unlock() +func (b *Binary) Build(ctx context.Context) (string, error) { + b.buildMu.Lock() + defer b.buildMu.Unlock() cmd := exec.Command( "go", "build", @@ -58,15 +81,15 @@ func (b *binaryObj) Build(ctx context.Context) (hash string, err error) { "-X github.com/kubeshop/testkube/internal/app/api/v1.SlackBotClientSecret=", "-X github.com/kubeshop/testkube/pkg/telemetry.TestkubeMeasurementID=", "-X github.com/kubeshop/testkube/pkg/telemetry.TestkubeMeasurementSecret=", - "-X github.com/kubeshop/testkube/internal/pkg/api.Version=dev", + "-X github.com/kubeshop/testkube/internal/pkg/api.Version=devbox", "-X github.com/kubeshop/testkube/internal/pkg/api.Commit=000000000", }, " ")), "./main.go", ) - cmd.Dir = filepath.Dir(b.mainFilePath) + cmd.Dir = filepath.Dir(b.mainPath) cmd.Env = append(os.Environ(), - fmt.Sprintf("GOOS=%s", b.os), - fmt.Sprintf("GOARCH=%s", b.arch), + fmt.Sprintf("GOOS=%s", b.operatingSystem), + fmt.Sprintf("GOARCH=%s", b.procArchitecture), ) r, w := io.Pipe() cmd.Stdout = w @@ -86,13 +109,16 @@ func (b *binaryObj) Build(ctx context.Context) (hash string, err error) { } }() - if err = cmd.Run(); err != nil { - w.Close() + err := cmd.Run() + w.Close() + if err != nil { bufMu.Lock() defer bufMu.Unlock() + if ctx.Err() != nil { + return "", ctx.Err() + } return "", fmt.Errorf("failed to build: %s: %s", err.Error(), string(buf)) } - w.Close() f, err := os.Open(b.outputPath) if err != nil { @@ -105,6 +131,9 @@ func (b *binaryObj) Build(ctx context.Context) (hash string, err error) { return "", fmt.Errorf("failed to get hash: %s", err.Error()) } - b.lastHash = fmt.Sprintf("%x", h.Sum(nil)) - return b.lastHash, nil + err = b.updateHash() + if err != nil { + return "", err + } + return b.hash, err } diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/certificates.go b/cmd/tcl/kubectl-testkube/devbox/devutils/certificates.go new file mode 100644 index 0000000000..7d63b11da1 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/certificates.go @@ -0,0 +1,89 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devutils + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" +) + +type CertificateSet struct { + CaPEM []byte + CrtPEM []byte + KeyPEM []byte +} + +func CreateCertificate(cert x509.Certificate) (result CertificateSet, err error) { + // Build CA + ca := &x509.Certificate{ + SerialNumber: big.NewInt(11111), + Subject: pkix.Name{ + Organization: []string{"Kubeshop"}, + Country: []string{"US"}, + Province: []string{""}, + Locality: []string{"Wilmington"}, + StreetAddress: []string{"Orange St"}, + PostalCode: []string{"19801"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(10, 0, 0), + IsCA: true, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + } + caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + return result, err + } + caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey) + if err != nil { + return result, err + } + caPEM := new(bytes.Buffer) + pem.Encode(caPEM, &pem.Block{Type: "CERTIFICATE", Bytes: caBytes}) + caPrivKeyPEM := new(bytes.Buffer) + pem.Encode(caPrivKeyPEM, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(caPrivKey)}) + + // Build the direct certificate + cert.NotBefore = ca.NotBefore + cert.NotAfter = ca.NotAfter + cert.SerialNumber = big.NewInt(11111) + cert.Subject = ca.Subject + cert.SubjectKeyId = []byte{1, 2, 3, 4, 6} + cert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth} + cert.KeyUsage = x509.KeyUsageDigitalSignature + + certPrivKey, err := rsa.GenerateKey(rand.Reader, 4096) + if err != nil { + return result, err + } + certBytes, err := x509.CreateCertificate(rand.Reader, &cert, ca, &certPrivKey.PublicKey, caPrivKey) + if err != nil { + return result, err + } + certPEM := new(bytes.Buffer) + pem.Encode(certPEM, &pem.Block{ + Type: "CERTIFICATE", + Bytes: certBytes, + }) + certPrivKeyPEM := new(bytes.Buffer) + pem.Encode(certPrivKeyPEM, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey)}) + + result.CaPEM = caPEM.Bytes() + result.CrtPEM = certPEM.Bytes() + result.KeyPEM = certPrivKeyPEM.Bytes() + return result, nil +} diff --git a/cmd/tcl/kubectl-testkube/devbox/cloud.go b/cmd/tcl/kubectl-testkube/devbox/devutils/cloud.go similarity index 73% rename from cmd/tcl/kubectl-testkube/devbox/cloud.go rename to cmd/tcl/kubectl-testkube/devbox/devutils/cloud.go index 815b472888..296d8183a9 100644 --- a/cmd/tcl/kubectl-testkube/devbox/cloud.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/cloud.go @@ -6,15 +6,21 @@ // // https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt -package devbox +package devutils import ( "errors" "fmt" "regexp" "strings" + "sync" + "time" + "github.com/spf13/cobra" + + common2 "github.com/kubeshop/testkube/cmd/kubectl-testkube/commands/common" "github.com/kubeshop/testkube/cmd/kubectl-testkube/config" + client2 "github.com/kubeshop/testkube/pkg/api/v1/client" "github.com/kubeshop/testkube/pkg/cloud/client" ) @@ -22,9 +28,15 @@ type cloudObj struct { cfg config.CloudContext envClient *client.EnvironmentsClient list []client.Environment + + clientMu sync.Mutex + client client2.Client + clientTs time.Time + + cmd *cobra.Command } -func NewCloud(cfg config.CloudContext) (*cloudObj, error) { +func NewCloud(cfg config.CloudContext, cmd *cobra.Command) (*cloudObj, error) { if cfg.ApiKey == "" || cfg.OrganizationId == "" || cfg.OrganizationName == "" { return nil, errors.New("login to the organization first") } @@ -47,6 +59,7 @@ func NewCloud(cfg config.CloudContext) (*cloudObj, error) { obj := &cloudObj{ cfg: cfg, envClient: envClient, + cmd: cmd, } err := obj.UpdateList() @@ -85,6 +98,29 @@ func (c *cloudObj) UpdateList() error { return nil } +func (c *cloudObj) Client(environmentId string) (client2.Client, error) { + c.clientMu.Lock() + defer c.clientMu.Unlock() + + if c.client == nil || c.clientTs.Add(5*time.Minute).Before(time.Now()) { + common2.GetClient(c.cmd) // refresh token + var err error + c.client, err = client2.GetClient(client2.ClientCloud, client2.Options{ + Insecure: c.AgentInsecure(), + ApiUri: c.ApiURI(), + CloudApiKey: c.ApiKey(), + CloudOrganization: c.cfg.OrganizationId, + CloudEnvironment: environmentId, + CloudApiPathPrefix: fmt.Sprintf("/organizations/%s/environments/%s/agent", c.cfg.OrganizationId, environmentId), + }) + if err != nil { + return nil, err + } + c.clientTs = time.Now() + } + return c.client, nil +} + func (c *cloudObj) AgentURI() string { return c.cfg.AgentUri } @@ -125,11 +161,3 @@ func (c *cloudObj) CreateEnvironment(name string) (*client.Environment, error) { func (c *cloudObj) DeleteEnvironment(id string) error { return c.envClient.Delete(id) } - -func (c *cloudObj) Debug() { - PrintHeader("Control Plane") - PrintItem("Organization", c.cfg.OrganizationName, c.cfg.OrganizationId) - PrintItem("API URL", c.cfg.ApiUri, "") - PrintItem("UI URL", c.cfg.UiUri, "") - PrintItem("Agent Server", c.cfg.AgentUri, "") -} diff --git a/cmd/tcl/kubectl-testkube/devbox/cluster.go b/cmd/tcl/kubectl-testkube/devbox/devutils/cluster.go similarity index 56% rename from cmd/tcl/kubectl-testkube/devbox/cluster.go rename to cmd/tcl/kubectl-testkube/devbox/devutils/cluster.go index b58f0186d2..19450acd94 100644 --- a/cmd/tcl/kubectl-testkube/devbox/cluster.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/cluster.go @@ -6,7 +6,7 @@ // // https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt -package devbox +package devutils import ( "strings" @@ -19,13 +19,13 @@ import ( "github.com/kubeshop/testkube/pkg/k8sclient" ) -type clusterObj struct { +type ClusterObject struct { cfg *rest.Config clientSet *kubernetes.Clientset versionInfo *version.Info } -func NewCluster() (*clusterObj, error) { +func NewCluster() (*ClusterObject, error) { config, err := rest.InClusterConfig() if err != nil { config, err = k8sclient.GetK8sClientConfig() @@ -42,48 +42,33 @@ func NewCluster() (*clusterObj, error) { return nil, errors.Wrap(err, "failed to get Kubernetes cluster details") } - return &clusterObj{ + return &ClusterObject{ clientSet: clientSet, versionInfo: info, cfg: config, }, nil } -func (c *clusterObj) Debug() { - PrintHeader("Cluster") - PrintItem("Address", c.cfg.Host, "") - PrintItem("Platform", c.versionInfo.Platform, "") - PrintItem("Version", c.versionInfo.GitVersion, "") -} - -func (c *clusterObj) ClientSet() *kubernetes.Clientset { +func (c *ClusterObject) ClientSet() *kubernetes.Clientset { return c.clientSet } -func (c *clusterObj) Config() *rest.Config { +func (c *ClusterObject) Config() *rest.Config { return c.cfg } -func (c *clusterObj) Namespace(name string) *namespaceObj { - return NewNamespace(c.clientSet, name) -} - -func (c *clusterObj) ImageRegistry(namespace string) *imageRegistryObj { - return NewImageRegistry(c.clientSet, c.cfg, namespace) -} - -func (c *clusterObj) ObjectStorage(namespace string) *objectStorageObj { - return NewObjectStorage(c.clientSet, c.cfg, namespace) +func (c *ClusterObject) Namespace(name string) *NamespaceObject { + return NewNamespace(c.clientSet, c.cfg, name) } -func (c *clusterObj) PodInterceptor(namespace string) *podInterceptorObj { - return NewPodInterceptor(c.clientSet, c.cfg, namespace) +func (c *ClusterObject) Host() string { + return c.cfg.Host } -func (c *clusterObj) OperatingSystem() string { +func (c *ClusterObject) OperatingSystem() string { return strings.Split(c.versionInfo.Platform, "/")[0] } -func (c *clusterObj) Architecture() string { +func (c *ClusterObject) Architecture() string { return strings.Split(c.versionInfo.Platform, "/")[1] } diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/crdsync.go b/cmd/tcl/kubectl-testkube/devbox/devutils/crdsync.go new file mode 100644 index 0000000000..dc47f89150 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/crdsync.go @@ -0,0 +1,246 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devutils + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/pkg/errors" + "gopkg.in/yaml.v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + testworkflowsv1 "github.com/kubeshop/testkube-operator/api/testworkflows/v1" + "github.com/kubeshop/testkube/internal/common" + "github.com/kubeshop/testkube/pkg/testworkflows/executionworker/controller/store" +) + +type CRDSyncWorkflow struct { + Workflow testworkflowsv1.TestWorkflow + SourcePath string +} + +type CRDSyncTemplate struct { + Template testworkflowsv1.TestWorkflowTemplate + SourcePath string +} + +type CRDSyncUpdateOp string + +const ( + CRDSyncUpdateOpCreate CRDSyncUpdateOp = "create" + CRDSyncUpdateOpUpdate CRDSyncUpdateOp = "update" + CRDSyncUpdateOpDelete CRDSyncUpdateOp = "delete" +) + +type CRDSyncUpdate struct { + Template *testworkflowsv1.TestWorkflowTemplate + Workflow *testworkflowsv1.TestWorkflow + Op CRDSyncUpdateOp +} + +type CRDSync struct { + workflows []CRDSyncWorkflow + templates []CRDSyncTemplate + updates []CRDSyncUpdate + mu sync.Mutex + emitter store.Update +} + +// TODO: optimize for duplicates +func NewCRDSync() *CRDSync { + return &CRDSync{ + workflows: make([]CRDSyncWorkflow, 0), + templates: make([]CRDSyncTemplate, 0), + updates: make([]CRDSyncUpdate, 0), + emitter: store.NewUpdate(), + } +} + +func (c *CRDSync) WorkflowsCount() int { + c.mu.Lock() + defer c.mu.Unlock() + return len(c.workflows) +} + +func (c *CRDSync) TemplatesCount() int { + c.mu.Lock() + defer c.mu.Unlock() + return len(c.templates) +} + +func (c *CRDSync) Next(ctx context.Context) (*CRDSyncUpdate, error) { + for { + if ctx.Err() != nil { + return nil, ctx.Err() + } + c.mu.Lock() + if len(c.updates) > 0 { + next := c.updates[0] + c.updates = c.updates[1:] + c.mu.Unlock() + return &next, nil + } + ch := c.emitter.Next() + c.mu.Unlock() + select { + case <-ctx.Done(): + case <-ch: + } + } +} + +func (c *CRDSync) processWorkflow(sourcePath string, workflow testworkflowsv1.TestWorkflow) error { + for i := range c.workflows { + if c.workflows[i].Workflow.Name == workflow.Name { + v1, _ := json.Marshal(c.workflows[i].Workflow) + v2, _ := json.Marshal(workflow) + c.workflows[i].SourcePath = sourcePath + if !bytes.Equal(v1, v2) { + c.workflows[i].Workflow = workflow + c.updates = append(c.updates, CRDSyncUpdate{Workflow: &workflow, Op: CRDSyncUpdateOpUpdate}) + } + return nil + } + } + c.workflows = append(c.workflows, CRDSyncWorkflow{SourcePath: sourcePath, Workflow: workflow}) + c.updates = append(c.updates, CRDSyncUpdate{Workflow: &workflow, Op: CRDSyncUpdateOpCreate}) + return nil +} + +func (c *CRDSync) processTemplate(sourcePath string, template testworkflowsv1.TestWorkflowTemplate) error { + for i := range c.templates { + if c.templates[i].Template.Name == template.Name { + v1, _ := json.Marshal(c.templates[i].Template) + v2, _ := json.Marshal(template) + if !bytes.Equal(v1, v2) { + c.templates[i].SourcePath = sourcePath + c.templates[i].Template = template + c.updates = append(c.updates, CRDSyncUpdate{Template: &template, Op: CRDSyncUpdateOpUpdate}) + return nil + } + } + } + c.templates = append(c.templates, CRDSyncTemplate{SourcePath: sourcePath, Template: template}) + c.updates = append(c.updates, CRDSyncUpdate{Template: &template, Op: CRDSyncUpdateOpCreate}) + return nil +} + +func (c *CRDSync) deleteFile(path string) error { + for i := 0; i < len(c.templates); i++ { + if c.templates[i].SourcePath == path { + c.updates = append(c.updates, CRDSyncUpdate{ + Template: &testworkflowsv1.TestWorkflowTemplate{ObjectMeta: metav1.ObjectMeta{Name: c.templates[i].Template.Name}}, + Op: CRDSyncUpdateOpDelete, + }) + c.templates = append(c.templates[:i], c.templates[i+1:]...) + i-- + } + } + for i := 0; i < len(c.workflows); i++ { + if c.workflows[i].SourcePath == path { + c.updates = append(c.updates, CRDSyncUpdate{ + Template: &testworkflowsv1.TestWorkflowTemplate{ObjectMeta: metav1.ObjectMeta{Name: c.templates[i].Template.Name}}, + Op: CRDSyncUpdateOpDelete, + }) + c.workflows = append(c.workflows[:i], c.workflows[i+1:]...) + i-- + } + } + return nil +} + +func (c *CRDSync) loadFile(path string) error { + // Ignore non-YAML files + if !strings.HasSuffix(path, ".yml") && !strings.HasSuffix(path, ".yaml") { + return nil + } + + // Parse the YAML file + file, err := os.Open(path) + if err != nil { + c.deleteFile(path) + return nil + } + + decoder := yaml.NewDecoder(file) + for { + var obj map[string]interface{} + err := decoder.Decode(&obj) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + break + } + + if obj["kind"] == nil || !(obj["kind"].(string) == "TestWorkflow" || obj["kind"].(string) == "TestWorkflowTemplate") { + continue + } + + if obj["kind"].(string) == "TestWorkflow" { + bytes, _ := yaml.Marshal(obj) + tw := testworkflowsv1.TestWorkflow{} + err := common.DeserializeCRD(&tw, bytes) + if tw.Name == "" { + continue + } + if err != nil { + continue + } + c.processWorkflow(path, tw) + } else if obj["kind"].(string) == "TestWorkflowTemplate" { + bytes, _ := yaml.Marshal(obj) + tw := testworkflowsv1.TestWorkflowTemplate{} + err := common.DeserializeCRD(&tw, bytes) + if tw.Name == "" { + continue + } + if err != nil { + continue + } + c.processTemplate(path, tw) + } + } + file.Close() + return nil +} + +func (c *CRDSync) Load(path string) error { + c.mu.Lock() + defer c.mu.Unlock() + + path, err := filepath.Abs(path) + if err != nil { + return err + } + + stat, err := os.Stat(path) + if err != nil { + return err + } + + if !stat.IsDir() { + return c.loadFile(path) + } + + return filepath.Walk(path, func(path string, info fs.FileInfo, err error) error { + if info.IsDir() { + return nil + } + return c.loadFile(path) + }) +} diff --git a/cmd/tcl/kubectl-testkube/devbox/walker.go b/cmd/tcl/kubectl-testkube/devbox/devutils/find.go similarity index 60% rename from cmd/tcl/kubectl-testkube/devbox/walker.go rename to cmd/tcl/kubectl-testkube/devbox/devutils/find.go index 27674be0b5..14804be6a6 100644 --- a/cmd/tcl/kubectl-testkube/devbox/walker.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/find.go @@ -6,25 +6,29 @@ // // https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt -package devbox +package devutils import ( "os" "path/filepath" ) -func findFile(path string) string { +func FindDirContaining(paths ...string) string { cwd, _ := os.Getwd() // Find near in the tree - current := filepath.Clean(filepath.Join(cwd, "testkube")) + current := filepath.Clean(filepath.Join(cwd, "testkube", "dummy")) +loop: for current != filepath.Clean(filepath.Join(cwd, "..")) { - expected := filepath.Clean(filepath.Join(current, path)) - _, err := os.Stat(expected) - if err == nil { - return expected - } current = filepath.Dir(current) + for _, path := range paths { + expected := filepath.Clean(filepath.Join(current, path)) + _, err := os.Stat(expected) + if err != nil { + continue loop + } + } + return current } return "" } diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/forwarding.go b/cmd/tcl/kubectl-testkube/devbox/devutils/forwarding.go new file mode 100644 index 0000000000..9f3a346920 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/forwarding.go @@ -0,0 +1,173 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devutils + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" +) + +func GetFreePort() int { + var a *net.TCPAddr + var err error + if a, err = net.ResolveTCPAddr("tcp", ":0"); err == nil { + var l *net.TCPListener + if l, err = net.ListenTCP("tcp", a); err == nil { + defer l.Close() + return l.Addr().(*net.TCPAddr).Port + } + } + panic(err) +} + +// TODO: Support context +func ForwardPod(config *rest.Config, namespace, podName string, clusterPort, localPort int, ping bool) error { + middlewarePort := GetFreePort() + transport, upgrader, err := spdy.RoundTripperFor(config) + if err != nil { + return err + } + path := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", namespace, podName) + hostIP := strings.TrimLeft(config.Host, "https://") + serverURL := url.URL{Scheme: "https", Path: path, Host: hostIP} + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, &serverURL) + stopChan, readyChan := make(chan struct{}, 1), make(chan struct{}, 1) + out, errOut := new(bytes.Buffer), new(bytes.Buffer) + forwarder, err := portforward.New(dialer, []string{fmt.Sprintf("%d:%d", middlewarePort, clusterPort)}, stopChan, readyChan, out, errOut) + if err != nil { + return err + } + go func() { + for { + if err = forwarder.ForwardPorts(); err != nil { + fmt.Println(errors.Wrap(err, "warn: forwarder: closed")) + time.Sleep(50 * time.Millisecond) + readyChan = make(chan struct{}, 1) + forwarder, err = portforward.New(dialer, []string{fmt.Sprintf("%d:%d", middlewarePort, clusterPort)}, stopChan, readyChan, out, errOut) + go func(readyChan chan struct{}) { + <-readyChan + fmt.Println("forwarder: reconnected") + }(readyChan) + } + } + }() + + // Hack to handle Kubernetes Port Forwarding issue. + // Stream through a different server, to ensure that both connections are fully read, with no broken pipe. + // @see {@link https://github.com/kubernetes/kubernetes/issues/74551} + ln, err := net.Listen("tcp", fmt.Sprintf(":%d", localPort)) + if err != nil { + return err + } + go func() { + defer ln.Close() + for { + conn, err := ln.Accept() + if err == nil { + go func(conn net.Conn) { + defer conn.Close() + open, err := net.Dial("tcp", fmt.Sprintf(":%d", middlewarePort)) + if err != nil { + return + } + defer open.Close() + var wg sync.WaitGroup + wg.Add(2) + go func() { + io.Copy(open, conn) + wg.Done() + }() + go func() { + io.Copy(conn, open) + wg.Done() + }() + wg.Wait() + + // Read all before closing + io.ReadAll(conn) + io.ReadAll(open) + }(conn) + } + } + }() + + if ping { + go func() { + for { + http.NewRequest(http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d", localPort), nil) + time.Sleep(4 * time.Second) + } + }() + } + + return nil +} + +func ProxySSL(sourcePort, sslPort int) error { + set, err := CreateCertificate(x509.Certificate{ + IPAddresses: []net.IP{net.ParseIP("0.0.0.0"), net.ParseIP("127.0.0.1"), net.IPv6loopback}, + }) + if err != nil { + return err + } + crt, err := tls.X509KeyPair(set.CrtPEM, set.KeyPEM) + if err != nil { + return err + } + ln, err := tls.Listen("tcp", fmt.Sprintf(":%d", sslPort), &tls.Config{ + Certificates: []tls.Certificate{crt}, + InsecureSkipVerify: true, + }) + if err != nil { + return err + } + go func() { + defer ln.Close() + + for { + conn, err := ln.Accept() + if err == nil { + go func(conn net.Conn) { + defer conn.Close() + open, err := net.Dial("tcp", fmt.Sprintf(":%d", sourcePort)) + if err != nil { + return + } + defer open.Close() + var wg sync.WaitGroup + wg.Add(2) + go func() { + io.Copy(open, conn) + wg.Done() + }() + go func() { + io.Copy(conn, open) + wg.Done() + }() + wg.Wait() + }(conn) + } + } + }() + return nil +} diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/fswatcher.go b/cmd/tcl/kubectl-testkube/devbox/devutils/fswatcher.go new file mode 100644 index 0000000000..c12a38e885 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/fswatcher.go @@ -0,0 +1,106 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devutils + +import ( + "context" + "io" + "io/fs" + "os" + "path/filepath" + + "github.com/fsnotify/fsnotify" +) + +type FsWatcher struct { + watcher *fsnotify.Watcher +} + +// TODO: support masks like **/*.go +func NewFsWatcher(paths ...string) (*FsWatcher, error) { + fsWatcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + w := &FsWatcher{ + watcher: fsWatcher, + } + for i := range paths { + if err = w.add(paths[i]); err != nil { + fsWatcher.Close() + return nil, err + } + } + return w, nil +} + +func (w *FsWatcher) Close() error { + return w.watcher.Close() +} + +func (w *FsWatcher) addRecursive(dirPath string) error { + if err := w.watcher.Add(dirPath); err != nil { + return err + } + return filepath.WalkDir(dirPath, func(path string, d fs.DirEntry, err error) error { + if err != nil || !d.IsDir() { + return nil + } + if filepath.Base(path)[0] == '.' { + // Ignore dot-files + return nil + } + if path == dirPath { + return nil + } + return w.addRecursive(path) + }) +} + +func (w *FsWatcher) add(path string) error { + path, err := filepath.Abs(path) + if err != nil { + return err + } + return w.addRecursive(path) +} + +func (w *FsWatcher) Next(ctx context.Context) (string, error) { + for { + select { + case <-ctx.Done(): + return "", ctx.Err() + case event, ok := <-w.watcher.Events: + if !ok { + return "", io.EOF + } + fileinfo, err := os.Stat(event.Name) + if err != nil { + continue + } + if fileinfo.IsDir() { + if event.Has(fsnotify.Create) { + if err = w.addRecursive(event.Name); err != nil { + return "", err + } + } + continue + } + if !event.Has(fsnotify.Create) && !event.Has(fsnotify.Write) && !event.Has(fsnotify.Remove) { + continue + } + return event.Name, nil + case err, ok := <-w.watcher.Errors: + if !ok { + return "", io.EOF + } + return "", err + } + } +} diff --git a/cmd/tcl/kubectl-testkube/devbox/podinterceptor.go b/cmd/tcl/kubectl-testkube/devbox/devutils/interceptor.go similarity index 53% rename from cmd/tcl/kubectl-testkube/devbox/podinterceptor.go rename to cmd/tcl/kubectl-testkube/devbox/devutils/interceptor.go index 7cd1cc18a9..c478c663ad 100644 --- a/cmd/tcl/kubectl-testkube/devbox/podinterceptor.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/interceptor.go @@ -6,27 +6,24 @@ // // https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt -package devbox +package devutils import ( "context" "crypto/x509" - "errors" "fmt" "io" "os" "sync" - "time" "github.com/kballard/go-shellquote" errors2 "github.com/pkg/errors" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" "github.com/kubeshop/testkube/cmd/testworkflow-toolkit/artifacts" @@ -34,69 +31,80 @@ import ( "github.com/kubeshop/testkube/pkg/testworkflows/testworkflowprocessor/constants" ) -type podInterceptorObj struct { - clientSet *kubernetes.Clientset - kubernetesConfig *rest.Config - namespace string - pod *corev1.Pod - caPem []byte - localPort int - localSslPort int +type Interceptor struct { + pod *PodObject + caPem []byte + initProcessImageName string + toolkitImageName string + binary *Binary } -func NewPodInterceptor(clientSet *kubernetes.Clientset, kubernetesConfig *rest.Config, namespace string) *podInterceptorObj { - return &podInterceptorObj{ - clientSet: clientSet, - namespace: namespace, - kubernetesConfig: kubernetesConfig, +func NewInterceptor(pod *PodObject, initProcessImageName, toolkitImageName string, binary *Binary) *Interceptor { + return &Interceptor{ + pod: pod, + initProcessImageName: initProcessImageName, + toolkitImageName: toolkitImageName, + binary: binary, } } -func (r *podInterceptorObj) Deploy(binaryPath, initImage, toolkitImage string) (err error) { - caPem, certPem, keyPem, err := CreateCertificate(x509.Certificate{ +func (r *Interceptor) Create(ctx context.Context) error { + if r.binary.Hash() == "" { + return errors2.New("interceptor binary is not built") + } + + certSet, err := CreateCertificate(x509.Certificate{ DNSNames: []string{ - fmt.Sprintf("devbox-interceptor.%s", r.namespace), - fmt.Sprintf("devbox-interceptor.%s.svc", r.namespace), + fmt.Sprintf("%s.%s", r.pod.Name(), r.pod.Namespace()), + fmt.Sprintf("%s.%s.svc", r.pod.Name(), r.pod.Namespace()), }, }) if err != nil { return err } - r.caPem = caPem // Deploy certificate - _, err = r.clientSet.CoreV1().Secrets(r.namespace).Create(context.Background(), &corev1.Secret{ + certSecretName := fmt.Sprintf("%s-cert", r.pod.Name()) + _, err = r.pod.ClientSet().CoreV1().Secrets(r.pod.Namespace()).Create(ctx, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "devbox-interceptor-cert", + Name: certSecretName, }, Data: map[string][]byte{ - "ca.crt": caPem, - "tls.crt": certPem, - "tls.key": keyPem, + "ca.crt": certSet.CaPEM, + "tls.crt": certSet.CrtPEM, + "tls.key": certSet.KeyPEM, }, }, metav1.CreateOptions{}) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + secret, err := r.pod.ClientSet().CoreV1().Secrets(r.pod.Namespace()).Get(ctx, certSecretName, metav1.GetOptions{}) + if err != nil { + return err + } + certSet.CaPEM = secret.Data["ca.crt"] + certSet.CrtPEM = secret.Data["tls.crt"] + certSet.KeyPEM = secret.Data["tls.key"] + } + r.caPem = certSet.CaPEM // Deploy Pod - r.pod, err = r.clientSet.CoreV1().Pods(r.namespace).Create(context.Background(), &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "devbox-interceptor", - Labels: map[string]string{ - "testkube.io/devbox": "interceptor", - }, - }, + err = r.pod.Create(ctx, &corev1.Pod{ Spec: corev1.PodSpec{ TerminationGracePeriodSeconds: common.Ptr(int64(1)), Volumes: []corev1.Volume{ {Name: "server", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, {Name: "certs", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{ - SecretName: "devbox-interceptor-cert", + SecretName: certSecretName, }}}, }, Containers: []corev1.Container{ { - Name: "interceptor", - Image: "busybox:1.36.1-musl", - Command: []string{"/bin/sh", "-c", fmt.Sprintf("while [ ! -f /app/server-ready ]; do sleep 1; done\n/app/server %s", shellquote.Join(initImage, toolkitImage))}, + Name: "interceptor", + Image: "busybox:1.36.1-musl", + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/sh", "-c", fmt.Sprintf("while [ ! -f /app/server-ready ]; do sleep 1; done\n/app/server %s", shellquote.Join(r.initProcessImageName, r.toolkitImageName))}, VolumeMounts: []corev1.VolumeMount{ {Name: "server", MountPath: "/app"}, {Name: "certs", MountPath: "/certs"}, @@ -114,44 +122,32 @@ func (r *podInterceptorObj) Deploy(binaryPath, initImage, toolkitImage string) ( }, }, }, - }, metav1.CreateOptions{}) + }) + + // Wait for the container to be started + err = r.pod.WaitForContainerStarted(ctx) if err != nil { - return + return err } - // Create the service - _, err = r.clientSet.CoreV1().Services(r.namespace).Create(context.Background(), &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "devbox-interceptor", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeClusterIP, - Selector: map[string]string{ - "testkube.io/devbox": "interceptor", - }, - Ports: []corev1.ServicePort{ - { - Name: "api", - Protocol: "TCP", - Port: 8443, - TargetPort: intstr.FromInt32(8443), - }, - }, - }, - }, metav1.CreateOptions{}) - - // Wait for the container to be started - err = r.WaitForContainerStarted() + // Deploy Service + err = r.pod.CreateService(ctx, corev1.ServicePort{ + Name: "api", + Protocol: "TCP", + Port: 8443, + TargetPort: intstr.FromInt32(8443), + }) if err != nil { - return + return err } + // TODO: Move transfer utilities to *PodObject // Apply the binary - req := r.clientSet.CoreV1().RESTClient(). + req := r.pod.ClientSet().CoreV1().RESTClient(). Post(). Resource("pods"). - Name(r.pod.Name). - Namespace(r.namespace). + Name(r.pod.Name()). + Namespace(r.pod.Namespace()). SubResource("exec"). VersionedParams(&corev1.PodExecOptions{ Container: "interceptor", @@ -162,7 +158,7 @@ func (r *podInterceptorObj) Deploy(binaryPath, initImage, toolkitImage string) ( TTY: false, }, scheme.ParameterCodec) - exec, err := remotecommand.NewSPDYExecutor(r.kubernetesConfig, "POST", req.URL()) + exec, err := remotecommand.NewSPDYExecutor(r.pod.RESTConfig(), "POST", req.URL()) if err != nil { return errors2.Wrap(err, "failed to create spdy executor") } @@ -175,17 +171,17 @@ func (r *podInterceptorObj) Deploy(binaryPath, initImage, toolkitImage string) ( defer flagFile.Close() flagFileStat, err := flagFile.Stat() if err != nil { - return + return err } - file, err := os.Open(binaryPath) + file, err := os.Open(r.binary.Path()) if err != nil { - return + return err } defer file.Close() fileStat, err := file.Stat() if err != nil { - return + return err } tarStream := artifacts.NewTarStream() @@ -213,65 +209,34 @@ func (r *podInterceptorObj) Deploy(binaryPath, initImage, toolkitImage string) ( writer.Close() bufMu.Lock() defer bufMu.Unlock() - return fmt.Errorf("failed to stream: %s: %s", err.Error(), string(buf)) + return fmt.Errorf("failed to stream binary: %s: %s", err.Error(), string(buf)) } writer.Close() - return -} - -func (r *podInterceptorObj) WaitForContainerStarted() (err error) { - for { - if r.pod != nil && len(r.pod.Status.ContainerStatuses) > 0 && r.pod.Status.ContainerStatuses[0].Started != nil && *r.pod.Status.ContainerStatuses[0].Started { - return nil - } - time.Sleep(500 * time.Millisecond) - pods, err := r.clientSet.CoreV1().Pods(r.namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: "testkube.io/devbox=interceptor", - }) - if err != nil { - return err - } - if len(pods.Items) == 0 { - return errors.New("pod not found") - } - r.pod = &pods.Items[0] - } + return nil } -func (r *podInterceptorObj) WaitForReady() (err error) { - for { - if r.pod != nil && len(r.pod.Status.ContainerStatuses) > 0 && r.pod.Status.ContainerStatuses[0].Ready { - return nil - } - time.Sleep(500 * time.Millisecond) - pods, err := r.clientSet.CoreV1().Pods(r.namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: "testkube.io/devbox=interceptor", - }) - if err != nil { - return err - } - if len(pods.Items) == 0 { - return errors.New("pod not found") - } - r.pod = &pods.Items[0] - } +func (r *Interceptor) WaitForReady(ctx context.Context) error { + return r.pod.WaitForReady(ctx) } -func (r *podInterceptorObj) Enable() (err error) { +func (r *Interceptor) Enable(ctx context.Context) error { _ = r.Disable() - _, err = r.clientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.Background(), &admissionregistrationv1.MutatingWebhookConfiguration{ + _, err := r.pod.ClientSet().AdmissionregistrationV1().MutatingWebhookConfigurations().Create(ctx, &admissionregistrationv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("devbox-interceptor-webhook-%s", r.namespace), + Name: fmt.Sprintf("%s-webhook-%s", r.pod.Name(), r.pod.Namespace()), + Labels: map[string]string{ + "testkube.io/devbox-name": r.pod.Namespace(), + }, }, Webhooks: []admissionregistrationv1.MutatingWebhook{ { Name: "devbox.kb.io", ClientConfig: admissionregistrationv1.WebhookClientConfig{ Service: &admissionregistrationv1.ServiceReference{ - Name: "devbox-interceptor", - Namespace: r.namespace, + Name: r.pod.Name(), + Namespace: r.pod.Namespace(), Path: common.Ptr("/mutate"), Port: common.Ptr(int32(8443)), }, @@ -295,7 +260,7 @@ func (r *podInterceptorObj) Enable() (err error) { { Key: "kubernetes.io/metadata.name", Operator: metav1.LabelSelectorOpIn, - Values: []string{r.namespace}, + Values: []string{r.pod.Namespace()}, }, }, }, @@ -312,19 +277,12 @@ func (r *podInterceptorObj) Enable() (err error) { }, }, }, metav1.CreateOptions{}) - return + return err } -func (r *podInterceptorObj) Disable() (err error) { - return r.clientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete( +func (r *Interceptor) Disable() (err error) { + return r.pod.ClientSet().AdmissionregistrationV1().MutatingWebhookConfigurations().Delete( context.Background(), - fmt.Sprintf("devbox-interceptor-webhook-%s", r.namespace), + fmt.Sprintf("%s-webhook-%s", r.pod.Name(), r.pod.Namespace()), metav1.DeleteOptions{}) } - -func (r *podInterceptorObj) IP() string { - if r.pod == nil { - return "" - } - return r.pod.Status.PodIP -} diff --git a/cmd/tcl/kubectl-testkube/devbox/namespace.go b/cmd/tcl/kubectl-testkube/devbox/devutils/namespace.go similarity index 51% rename from cmd/tcl/kubectl-testkube/devbox/namespace.go rename to cmd/tcl/kubectl-testkube/devbox/devutils/namespace.go index 70673859c7..8f0845ff4a 100644 --- a/cmd/tcl/kubectl-testkube/devbox/namespace.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/namespace.go @@ -6,10 +6,11 @@ // // https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt -package devbox +package devutils import ( "context" + "fmt" "strings" "time" @@ -19,37 +20,47 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "github.com/kubeshop/testkube/internal/common" ) -type namespaceObj struct { - clientSet kubernetes.Interface - namespace string - ns *corev1.Namespace +var ( + ErrNotDevboxNamespace = errors.New("selected namespace exists and is not devbox") +) + +type NamespaceObject struct { + name string + clientSet *kubernetes.Clientset + restConfig *rest.Config + namespace *corev1.Namespace } -func NewNamespace(clientSet kubernetes.Interface, namespace string) *namespaceObj { - return &namespaceObj{ - clientSet: clientSet, - namespace: namespace, +func NewNamespace(kubeClient *kubernetes.Clientset, kubeRestConfig *rest.Config, name string) *NamespaceObject { + return &NamespaceObject{ + name: name, + clientSet: kubeClient, + restConfig: kubeRestConfig, } } -func (n *namespaceObj) ServiceAccountName() string { +func (n *NamespaceObject) Name() string { + return n.name +} + +func (n *NamespaceObject) ServiceAccountName() string { return "devbox-account" } -func (n *namespaceObj) Create() (err error) { - if n.ns != nil { - return nil - } +func (n *NamespaceObject) Pod(name string) *PodObject { + return NewPod(n.clientSet, n.restConfig, n.name, name) +} - // Create namespace +func (n *NamespaceObject) create() error { for { - n.ns, err = n.clientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ + namespace, err := n.clientSet.CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: n.namespace, + Name: n.name, Labels: map[string]string{ "testkube.io/devbox": "namespace", }, @@ -60,21 +71,41 @@ func (n *namespaceObj) Create() (err error) { time.Sleep(200 * time.Millisecond) continue } + if k8serrors.IsAlreadyExists(err) { + namespace, err = n.clientSet.CoreV1().Namespaces().Get(context.Background(), n.name, metav1.GetOptions{}) + if err != nil { + return err + } + if namespace.Labels["testkube.io/devbox"] != "namespace" { + return ErrNotDevboxNamespace + } + err = n.clientSet.CoreV1().Namespaces().Delete(context.Background(), n.name, metav1.DeleteOptions{ + GracePeriodSeconds: common.Ptr(int64(0)), + PropagationPolicy: common.Ptr(metav1.DeletePropagationForeground), + }) + if err != nil { + return err + } + continue + } return errors.Wrap(err, "failed to create namespace") } - break + n.namespace = namespace + return nil } +} +func (n *NamespaceObject) createServiceAccount() error { // Create service account - serviceAccount, err := n.clientSet.CoreV1().ServiceAccounts(n.namespace).Create(context.Background(), &corev1.ServiceAccount{ + serviceAccount, err := n.clientSet.CoreV1().ServiceAccounts(n.name).Create(context.Background(), &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{Name: n.ServiceAccountName()}, }, metav1.CreateOptions{}) - if err != nil { + if err != nil && !k8serrors.IsAlreadyExists(err) { return errors.Wrap(err, "failed to create service account") } // Create service account role - role, err := n.clientSet.RbacV1().Roles(n.namespace).Create(context.Background(), &rbacv1.Role{ + role, err := n.clientSet.RbacV1().Roles(n.name).Create(context.Background(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: "devbox-account-role", }, @@ -101,18 +132,18 @@ func (n *namespaceObj) Create() (err error) { }, }, }, metav1.CreateOptions{}) - if err != nil { - return errors.Wrap(err, "failed to create role binding") + if err != nil && !k8serrors.IsAlreadyExists(err) { + return errors.Wrap(err, "failed to create roles") } // Create service account role binding - _, err = n.clientSet.RbacV1().RoleBindings(n.namespace).Create(context.Background(), &rbacv1.RoleBinding{ + _, err = n.clientSet.RbacV1().RoleBindings(n.name).Create(context.Background(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{Name: "devbox-account-rb"}, Subjects: []rbacv1.Subject{ { Kind: "ServiceAccount", Name: serviceAccount.Name, - Namespace: n.namespace, + Namespace: n.name, }, }, RoleRef: rbacv1.RoleRef{ @@ -121,17 +152,36 @@ func (n *namespaceObj) Create() (err error) { Name: role.Name, }, }, metav1.CreateOptions{}) + if err != nil && !k8serrors.IsAlreadyExists(err) { + return errors.Wrap(err, "failed to create role bindings") + } + return nil +} + +func (n *NamespaceObject) Create() error { + if n.namespace != nil { + return nil + } + if err := n.create(); err != nil { + return err + } + if err := n.createServiceAccount(); err != nil { + return err + } return nil } -func (n *namespaceObj) Destroy() error { - err := n.clientSet.CoreV1().Namespaces().Delete(context.Background(), n.namespace, metav1.DeleteOptions{ +func (n *NamespaceObject) Destroy() error { + err := n.clientSet.CoreV1().Namespaces().Delete(context.Background(), n.name, metav1.DeleteOptions{ GracePeriodSeconds: common.Ptr(int64(0)), PropagationPolicy: common.Ptr(metav1.DeletePropagationForeground), }) - if k8serrors.IsNotFound(err) { - return nil + if err != nil && !k8serrors.IsNotFound(err) { + return err } + err = n.clientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("testkube.io/devbox-name=%s", n.name), + }) return err } diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go b/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go new file mode 100644 index 0000000000..77a1f62354 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go @@ -0,0 +1,193 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devutils + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "io" + "net/http" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + minio2 "github.com/minio/minio-go/v7" + + "github.com/kubeshop/testkube/internal/common" + "github.com/kubeshop/testkube/pkg/log" + "github.com/kubeshop/testkube/pkg/storage/minio" +) + +type ObjectStorage struct { + pod *PodObject + localPort int + hashes map[string]string + hashMu sync.RWMutex + cachedClient *minio2.Client + cachedClientMu sync.Mutex +} + +func NewObjectStorage(pod *PodObject) *ObjectStorage { + return &ObjectStorage{ + pod: pod, + hashes: make(map[string]string), + } +} + +func (r *ObjectStorage) Is(path string, hash string) bool { + r.hashMu.RLock() + defer r.hashMu.RUnlock() + return r.hashes[path] == hash +} + +func (r *ObjectStorage) SetHash(path string, hash string) { + r.hashMu.Lock() + defer r.hashMu.Unlock() + r.hashes[path] = hash +} + +func (r *ObjectStorage) Create(ctx context.Context) error { + err := r.pod.Create(ctx, &corev1.Pod{ + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: common.Ptr(int64(1)), + Containers: []corev1.Container{ + { + Name: "minio", + Image: "minio/minio:RELEASE.2024-10-13T13-34-11Z", + ImagePullPolicy: corev1.PullIfNotPresent, + Args: []string{"server", "/data", "--console-address", ":9090"}, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + TCPSocket: &corev1.TCPSocketAction{ + Port: intstr.FromInt32(9000), + }, + }, + PeriodSeconds: 1, + }, + }, + }, + }, + }) + if err != nil { + return err + } + err = r.pod.CreateService(ctx, corev1.ServicePort{ + Name: "api", + Protocol: "TCP", + Port: 9000, + TargetPort: intstr.FromInt32(9000), + }) + if err != nil { + return err + } + + err = r.pod.WaitForContainerStarted(ctx) + if err != nil { + return err + } + + r.localPort = GetFreePort() + err = r.pod.Forward(ctx, 9000, r.localPort, true) + if err != nil { + fmt.Println("Forward error") + return err + } + + c, err := r.Client() + if err != nil { + fmt.Println("Creating client") + return err + } + + // Handle a case when port forwarder is not ready + for i := 0; i < 10; i++ { + makeBucketCtx, ctxCancel := context.WithTimeout(ctx, 2*time.Second) + err = c.MakeBucket(makeBucketCtx, "devbox", minio2.MakeBucketOptions{}) + if err == nil { + ctxCancel() + return nil + } + if ctx.Err() != nil { + ctxCancel() + return ctx.Err() + } + ctxCancel() + } + return nil +} + +func (r *ObjectStorage) Client() (*minio2.Client, error) { + r.cachedClientMu.Lock() + defer r.cachedClientMu.Unlock() + if r.cachedClient != nil { + return r.cachedClient, nil + } + connecter := minio.NewConnecter( + fmt.Sprintf("localhost:%d", r.localPort), + "minioadmin", + "minioadmin", + "", + "", + "devbox", + log.DefaultLogger, + ) + cl, err := connecter.GetClient() + if err != nil { + return nil, err + } + r.cachedClient = cl + return cl, nil +} + +func (r *ObjectStorage) WaitForReady(ctx context.Context) error { + return r.pod.WaitForReady(ctx) +} + +// TODO: Compress on-fly +func (r *ObjectStorage) Upload(ctx context.Context, path string, reader io.Reader, hash string) error { + c, err := r.Client() + if err != nil { + return err + } + if hash != "" && r.Is(path, hash) { + return nil + } + putUrl, err := c.PresignedPutObject(ctx, "devbox", path, 15*time.Minute) + if err != nil { + return err + } + buf := new(bytes.Buffer) + //g := gzip.NewWriter(buf) + io.Copy(buf, reader) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, putUrl.String(), buf) + if err != nil { + return err + } + req.ContentLength = int64(buf.Len()) + + req.Header.Set("Content-Type", "application/octet-stream") + //req.Header.Set("Content-Encoding", "gzip") + tr := http.DefaultTransport.(*http.Transport).Clone() + tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + client := &http.Client{Transport: tr} + res, err := client.Do(req) + if err != nil { + return err + } + if res.StatusCode != http.StatusOK { + b, _ := io.ReadAll(res.Body) + return fmt.Errorf("failed saving file: status code: %d / message: %s", res.StatusCode, string(b)) + } + r.SetHash(path, hash) + return nil +} diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/pods.go b/cmd/tcl/kubectl-testkube/devbox/devutils/pods.go new file mode 100644 index 0000000000..91af26eace --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/pods.go @@ -0,0 +1,228 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devutils + +import ( + "context" + "fmt" + "sync" + "time" + + errors2 "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/kubeshop/testkube/internal/common" +) + +var ( + ErrPodNotFound = errors2.New("pod not found") +) + +type PodObject struct { + name string + namespace string + pod *corev1.Pod + service *corev1.Service + clientSet *kubernetes.Clientset + restConfig *rest.Config + + mu sync.Mutex +} + +func NewPod(kubeClient *kubernetes.Clientset, kubeRestConfig *rest.Config, namespace, name string) *PodObject { + return &PodObject{ + name: name, + namespace: namespace, + clientSet: kubeClient, + restConfig: kubeRestConfig, + } +} + +func (p *PodObject) Name() string { + return p.name +} + +func (p *PodObject) Namespace() string { + return p.namespace +} + +func (p *PodObject) Selector() metav1.LabelSelector { + return metav1.LabelSelector{ + MatchLabels: map[string]string{ + "testkube.io/devbox": p.name, + }, + } +} + +func (p *PodObject) ClientSet() *kubernetes.Clientset { + return p.clientSet +} + +func (p *PodObject) RESTConfig() *rest.Config { + return p.restConfig +} + +func (p *PodObject) Create(ctx context.Context, request *corev1.Pod) error { + p.mu.Lock() + defer p.mu.Unlock() + if p.pod != nil { + return nil + } + return p.create(ctx, request) +} + +func (p *PodObject) create(ctx context.Context, request *corev1.Pod) error { + request = request.DeepCopy() + request.Name = p.name + request.Namespace = p.namespace + request.ResourceVersion = "" + if len(request.Labels) == 0 { + request.Labels = make(map[string]string) + } + request.Labels["testkube.io/devbox"] = p.name + + pod, err := p.clientSet.CoreV1().Pods(p.namespace).Create(ctx, request, metav1.CreateOptions{}) + if errors.IsAlreadyExists(err) { + err = p.clientSet.CoreV1().Pods(p.namespace).Delete(ctx, request.Name, metav1.DeleteOptions{ + GracePeriodSeconds: common.Ptr(int64(0)), + PropagationPolicy: common.Ptr(metav1.DeletePropagationForeground), + }) + if err != nil { + return errors2.Wrap(err, "failed to delete existing pod") + } + pod, err = p.clientSet.CoreV1().Pods(p.namespace).Create(context.Background(), request, metav1.CreateOptions{}) + } + if err != nil { + return errors2.Wrap(err, "failed to create pod") + } + p.pod = pod + return nil +} + +func (p *PodObject) Pod() *corev1.Pod { + return p.pod +} + +func (p *PodObject) RefreshData(ctx context.Context) error { + p.mu.Lock() + defer p.mu.Unlock() + + pods, err := p.clientSet.CoreV1().Pods(p.namespace).List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("testkube.io/devbox=%s", p.name), + }) + if err != nil { + return err + } + if len(pods.Items) == 0 { + p.pod = nil + return ErrPodNotFound + } + p.pod = &pods.Items[0] + return nil +} + +func (p *PodObject) Watch(ctx context.Context) error { + panic("not implemented") +} + +func (p *PodObject) Restart(ctx context.Context) error { + p.mu.Lock() + defer p.mu.Unlock() + + pod := p.pod + if pod == nil { + return ErrPodNotFound + } + p.pod = nil + _ = p.clientSet.CoreV1().Pods(p.namespace).Delete(context.Background(), p.name, metav1.DeleteOptions{ + GracePeriodSeconds: common.Ptr(int64(0)), + PropagationPolicy: common.Ptr(metav1.DeletePropagationForeground), + }) + return p.create(context.Background(), pod) +} + +func (p *PodObject) WaitForReady(ctx context.Context) error { + for { + if p.pod != nil && len(p.pod.Status.ContainerStatuses) > 0 && p.pod.Status.ContainerStatuses[0].Ready { + return nil + } + time.Sleep(300 * time.Millisecond) + err := p.RefreshData(ctx) + if err != nil { + return err + } + } +} + +func (p *PodObject) WaitForContainerStarted(ctx context.Context) (err error) { + for { + if p.pod != nil && len(p.pod.Status.ContainerStatuses) > 0 && p.pod.Status.ContainerStatuses[0].Started != nil && *p.pod.Status.ContainerStatuses[0].Started { + return nil + } + time.Sleep(300 * time.Millisecond) + err := p.RefreshData(ctx) + if err != nil { + return err + } + } +} + +func (p *PodObject) ClusterIP() string { + if p.pod == nil { + return "" + } + return p.pod.Status.PodIP +} + +func (p *PodObject) ClusterAddress() string { + if p.service == nil { + return p.ClusterIP() + } + return fmt.Sprintf("%s.%s.svc", p.service.Name, p.service.Namespace) +} + +func (p *PodObject) CreateService(ctx context.Context, ports ...corev1.ServicePort) error { + request := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: p.name, + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + Selector: map[string]string{ + "testkube.io/devbox": p.name, + }, + Ports: ports, + }, + } + + svc, err := p.clientSet.CoreV1().Services(p.namespace).Create(ctx, request, metav1.CreateOptions{}) + if errors.IsAlreadyExists(err) { + err = p.clientSet.CoreV1().Services(p.namespace).Delete(ctx, request.Name, metav1.DeleteOptions{}) + if err != nil { + return errors2.Wrap(err, "failed to delete existing service") + } + svc, err = p.clientSet.CoreV1().Services(p.namespace).Create(ctx, request, metav1.CreateOptions{}) + } + if err != nil { + return err + } + p.service = svc + return nil +} + +func (p *PodObject) Forward(_ context.Context, clusterPort, localPort int, ping bool) error { + if p.pod == nil { + return ErrPodNotFound + } + return ForwardPod(p.restConfig, p.pod.Namespace, p.pod.Name, clusterPort, localPort, ping) +} diff --git a/cmd/tcl/kubectl-testkube/devbox/forward.go b/cmd/tcl/kubectl-testkube/devbox/forward.go deleted file mode 100644 index 3cc2ae6c6b..0000000000 --- a/cmd/tcl/kubectl-testkube/devbox/forward.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2024 Testkube. -// -// Licensed as a Testkube Pro file under the Testkube Community -// License (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt - -package devbox - -import ( - "bytes" - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io" - "math/big" - "net" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/pkg/errors" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/portforward" - "k8s.io/client-go/transport/spdy" - - "github.com/kubeshop/testkube/pkg/ui" -) - -func GetFreePort() (port int, err error) { - var a *net.TCPAddr - if a, err = net.ResolveTCPAddr("tcp", "localhost:0"); err == nil { - var l *net.TCPListener - if l, err = net.ListenTCP("tcp", a); err == nil { - defer l.Close() - return l.Addr().(*net.TCPAddr).Port, nil - } - } - return -} - -func ForwardPodPort(config *rest.Config, namespace, podName string, clusterPort, localPort int) error { - middlewarePort, err := GetFreePort() - if err != nil { - return err - } - transport, upgrader, err := spdy.RoundTripperFor(config) - if err != nil { - return err - } - path := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", namespace, podName) - hostIP := strings.TrimLeft(config.Host, "https://") - serverURL := url.URL{Scheme: "https", Path: path, Host: hostIP} - dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, &serverURL) - stopChan, readyChan := make(chan struct{}, 1), make(chan struct{}, 1) - out, errOut := new(bytes.Buffer), new(bytes.Buffer) - forwarder, err := portforward.New(dialer, []string{fmt.Sprintf("%d:%d", middlewarePort, clusterPort)}, stopChan, readyChan, out, errOut) - if err != nil { - return err - } - go func() { - if err = forwarder.ForwardPorts(); err != nil { - ui.Fail(errors.Wrap(err, "failed to forward ports")) - } - fmt.Println("finish forwarding ports") - }() - - // Hack to handle Kubernetes Port Forwarding issue. - // Stream through a different server, to ensure that both connections are fully read, with no broken pipe. - // @see {@link https://github.com/kubernetes/kubernetes/issues/74551} - ln, err := net.Listen("tcp", fmt.Sprintf(":%d", localPort)) - if err != nil { - return err - } - go func() { - defer ln.Close() - for { - conn, err := ln.Accept() - if err == nil { - go func(conn net.Conn) { - defer conn.Close() - open, err := net.Dial("tcp", fmt.Sprintf(":%d", middlewarePort)) - if err != nil { - return - } - defer open.Close() - var wg sync.WaitGroup - wg.Add(2) - go func() { - io.Copy(open, conn) - wg.Done() - }() - go func() { - io.Copy(conn, open) - wg.Done() - }() - wg.Wait() - - // Read all before closing - io.ReadAll(conn) - io.ReadAll(open) - }(conn) - } - } - }() - - for range readyChan { - } - go func() { - for { - http.NewRequest(http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d", localPort), nil) - time.Sleep(1 * time.Second) - } - }() - - return nil -} - -func CreateCertificate(cert x509.Certificate) (rcaPEM, rcrtPEM, rkeyPEM []byte, err error) { - // Build CA - ca := &x509.Certificate{ - SerialNumber: big.NewInt(11111), - Subject: pkix.Name{ - Organization: []string{"Kubeshop"}, - Country: []string{"US"}, - Province: []string{""}, - Locality: []string{"Wilmington"}, - StreetAddress: []string{"Orange St"}, - PostalCode: []string{"19801"}, - }, - NotBefore: time.Now(), - NotAfter: time.Now().AddDate(10, 0, 0), - IsCA: true, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, - KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - BasicConstraintsValid: true, - } - caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096) - if err != nil { - return nil, nil, nil, err - } - caBytes, err := x509.CreateCertificate(rand.Reader, ca, ca, &caPrivKey.PublicKey, caPrivKey) - if err != nil { - return nil, nil, nil, err - } - caPEM := new(bytes.Buffer) - pem.Encode(caPEM, &pem.Block{ - Type: "CERTIFICATE", - Bytes: caBytes, - }) - caPrivKeyPEM := new(bytes.Buffer) - pem.Encode(caPrivKeyPEM, &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(caPrivKey), - }) - - // Build the direct certificate - cert.NotBefore = ca.NotBefore - cert.NotAfter = ca.NotAfter - cert.SerialNumber = big.NewInt(11111) - cert.Subject = ca.Subject - cert.SubjectKeyId = []byte{1, 2, 3, 4, 6} - cert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth} - cert.KeyUsage = x509.KeyUsageDigitalSignature - - certPrivKey, err := rsa.GenerateKey(rand.Reader, 4096) - if err != nil { - return nil, nil, nil, err - } - certBytes, err := x509.CreateCertificate(rand.Reader, &cert, ca, &certPrivKey.PublicKey, caPrivKey) - if err != nil { - return nil, nil, nil, err - } - certPEM := new(bytes.Buffer) - pem.Encode(certPEM, &pem.Block{ - Type: "CERTIFICATE", - Bytes: certBytes, - }) - certPrivKeyPEM := new(bytes.Buffer) - pem.Encode(certPrivKeyPEM, &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey), - }) - - return caPEM.Bytes(), certPEM.Bytes(), certPrivKeyPEM.Bytes(), nil -} - -func CreateSelfSignedCertificate(tml x509.Certificate) (tls.Certificate, []byte, []byte, error) { - key, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return tls.Certificate{}, nil, nil, err - } - keyPem := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}) - tml.NotBefore = time.Now() - tml.NotAfter = time.Now().AddDate(5, 0, 0) - tml.SerialNumber = big.NewInt(123456) - tml.BasicConstraintsValid = true - cert, err := x509.CreateCertificate(rand.Reader, &tml, &tml, &key.PublicKey, key) - if err != nil { - return tls.Certificate{}, nil, nil, err - } - certPem := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert}) - tlsCert, err := tls.X509KeyPair(certPem, keyPem) - if err != nil { - return tls.Certificate{}, nil, nil, err - } - return tlsCert, certPem, keyPem, nil -} - -func ProxySSL(sourcePort, sslPort int) error { - tlsCert, _, _, err := CreateSelfSignedCertificate(x509.Certificate{ - IPAddresses: []net.IP{net.ParseIP("0.0.0.0"), net.ParseIP("127.0.0.1")}, - }) - if err != nil { - return err - } - ln, err := tls.Listen("tcp", fmt.Sprintf(":%d", sslPort), &tls.Config{ - Certificates: []tls.Certificate{tlsCert}, - InsecureSkipVerify: true, - }) - if err != nil { - return err - } - go func() { - defer ln.Close() - - for { - conn, err := ln.Accept() - if err == nil { - go func(conn net.Conn) { - defer conn.Close() - open, err := net.Dial("tcp", fmt.Sprintf(":%d", sourcePort)) - if err != nil { - return - } - defer open.Close() - var wg sync.WaitGroup - wg.Add(2) - go func() { - io.Copy(open, conn) - wg.Done() - }() - go func() { - io.Copy(conn, open) - wg.Done() - }() - wg.Wait() - - io.ReadAll(conn) - io.ReadAll(open) - }(conn) - } - } - }() - return nil -} diff --git a/cmd/tcl/kubectl-testkube/devbox/objectstorage.go b/cmd/tcl/kubectl-testkube/devbox/objectstorage.go deleted file mode 100644 index 4d54232c2a..0000000000 --- a/cmd/tcl/kubectl-testkube/devbox/objectstorage.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2024 Testkube. -// -// Licensed as a Testkube Pro file under the Testkube Community -// License (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt - -package devbox - -import ( - "context" - "errors" - "fmt" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - - "github.com/kubeshop/testkube/internal/common" - "github.com/kubeshop/testkube/pkg/storage/minio" -) - -type objectStorageObj struct { - clientSet kubernetes.Interface - kubernetesConfig *rest.Config - namespace string - pod *corev1.Pod - localPort int - localWebPort int -} - -func NewObjectStorage(clientSet kubernetes.Interface, kubernetesConfig *rest.Config, namespace string) *objectStorageObj { - return &objectStorageObj{ - clientSet: clientSet, - namespace: namespace, - kubernetesConfig: kubernetesConfig, - } -} - -func (r *objectStorageObj) Deploy() (err error) { - r.pod, err = r.clientSet.CoreV1().Pods(r.namespace).Create(context.Background(), &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "devbox-storage", - Labels: map[string]string{ - "testkube.io/devbox": "storage", - }, - }, - Spec: corev1.PodSpec{ - TerminationGracePeriodSeconds: common.Ptr(int64(1)), - Containers: []corev1.Container{ - { - Name: "minio", - Image: "minio/minio:RELEASE.2024-10-13T13-34-11Z", - Args: []string{"server", "/data", "--console-address", ":9090"}, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt32(9000), - }, - }, - PeriodSeconds: 1, - }, - }, - }, - }, - }, metav1.CreateOptions{}) - if err != nil { - return err - } - - // Create the service - _, err = r.clientSet.CoreV1().Services(r.namespace).Create(context.Background(), &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: "devbox-storage", - }, - Spec: corev1.ServiceSpec{ - Type: corev1.ServiceTypeClusterIP, - Selector: map[string]string{ - "testkube.io/devbox": "storage", - }, - Ports: []corev1.ServicePort{ - { - Name: "api", - Protocol: "TCP", - Port: 9000, - TargetPort: intstr.FromInt32(9000), - }, - }, - }, - }, metav1.CreateOptions{}) - - return -} - -func (r *objectStorageObj) WaitForReady() (err error) { - for { - if r.pod != nil && len(r.pod.Status.ContainerStatuses) > 0 && r.pod.Status.ContainerStatuses[0].Ready { - return nil - } - time.Sleep(500 * time.Millisecond) - pods, err := r.clientSet.CoreV1().Pods(r.namespace).List(context.Background(), metav1.ListOptions{ - LabelSelector: "testkube.io/devbox=storage", - }) - if err != nil { - return err - } - if len(pods.Items) == 0 { - return errors.New("pod not found") - } - r.pod = &pods.Items[0] - } -} - -func (r *objectStorageObj) IP() string { - if r.pod == nil { - return "" - } - return r.pod.Status.PodIP -} - -func (r *objectStorageObj) ClusterAddress() string { - ip := r.IP() - if ip == "" { - return "" - } - return fmt.Sprintf("%s:%d", ip, 9000) -} - -func (r *objectStorageObj) LocalAddress() string { - if r.localPort == 0 { - return "" - } - return fmt.Sprintf("0.0.0.0:%d", r.localPort) -} - -func (r *objectStorageObj) LocalWebAddress() string { - if r.localWebPort == 0 { - return "" - } - return fmt.Sprintf("127.0.0.1:%d", r.localWebPort) -} - -func (r *objectStorageObj) Forward() error { - if r.pod == nil { - return errors.New("pod not found") - } - if r.localPort != 0 { - return nil - } - port, err := GetFreePort() - if r.localWebPort != 0 { - return nil - } - webPort, err := GetFreePort() - if err != nil { - return err - } - err = ForwardPodPort(r.kubernetesConfig, r.pod.Namespace, r.pod.Name, 9000, port) - if err != nil { - return err - } - r.localPort = port - err = ForwardPodPort(r.kubernetesConfig, r.pod.Namespace, r.pod.Name, 9090, webPort) - if err != nil { - return err - } - r.localWebPort = webPort - return nil -} - -func (r *objectStorageObj) Connect() (*minio.Client, error) { - minioClient := minio.NewClient( - r.LocalAddress(), - "minioadmin", - "minioadmin", - "", - "", - "devbox", - ) - err := minioClient.Connect() - return minioClient, err -} - -func (r *objectStorageObj) Debug() { - PrintHeader("Object Storage") - if r.ClusterAddress() != "" { - PrintItem("Cluster Address", r.ClusterAddress(), "") - } else { - PrintItem("Cluster Address", "unknown", "") - } - if r.LocalAddress() != "" { - PrintItem("Local Address", r.LocalAddress(), "") - } else { - PrintItem("Local Address", "not forwarded", "") - } - if r.LocalWebAddress() != "" { - PrintItem("Console", "http://"+r.LocalWebAddress(), "minioadmin / minioadmin") - } else { - PrintItem("Console", "not forwarded", "") - } -} diff --git a/cmd/tcl/kubectl-testkube/devbox/print.go b/cmd/tcl/kubectl-testkube/devbox/print.go deleted file mode 100644 index 4070480e30..0000000000 --- a/cmd/tcl/kubectl-testkube/devbox/print.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2024 Testkube. -// -// Licensed as a Testkube Pro file under the Testkube Community -// License (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt - -package devbox - -import ( - "errors" - "fmt" - "strings" - "time" - - "github.com/gookit/color" - "github.com/pterm/pterm" -) - -const ( - printItemNameLen = 20 -) - -var ( - DefaultSpinner = buildDefaultSpinner() -) - -func buildDefaultSpinner() pterm.SpinnerPrinter { - spinner := *pterm.DefaultSpinner.WithSequence(" ◐ ", " ◓ ", " ◑ ", " ◒ ") - spinner.SuccessPrinter = &pterm.PrefixPrinter{ - MessageStyle: &pterm.ThemeDefault.SuccessMessageStyle, - Prefix: pterm.Prefix{ - Style: &pterm.ThemeDefault.SuccessPrefixStyle, - Text: "✓", - }, - } - spinner.FailPrinter = &pterm.PrefixPrinter{ - MessageStyle: &pterm.ThemeDefault.ErrorMessageStyle, - Prefix: pterm.Prefix{ - Style: &pterm.ThemeDefault.ErrorPrefixStyle, - Text: "×", - }, - } - return spinner -} - -func PrintHeader(content string) { - fmt.Println("\n" + color.Blue.Render(color.Bold.Render(content))) -} - -func PrintActionHeader(content string) { - fmt.Println("\n" + color.Magenta.Render(color.Bold.Render(content))) -} - -func PrintItem(name, value, hint string) { - whitespace := strings.Repeat(" ", printItemNameLen-len(name)) - if hint != "" { - fmt.Printf("%s%s %s %s\n", whitespace, color.Bold.Render(name+":"), value, color.FgDarkGray.Render("("+hint+")")) - } else { - fmt.Printf("%s%s %s\n", whitespace, color.Bold.Render(name+":"), value) - } -} - -func PrintSpinner(nameOrLabel ...string) func(name string, err ...error) { - multi := pterm.DefaultMultiPrinter.WithUpdateDelay(10 * time.Millisecond) - messages := make(map[string]string, len(nameOrLabel)/2) - spinners := make(map[string]*pterm.SpinnerPrinter, len(nameOrLabel)/2) - - for i := 0; i < len(nameOrLabel); i += 2 { - name := nameOrLabel[i] - messages[name] = nameOrLabel[i+1] - spinners[name], _ = DefaultSpinner.WithWriter(multi.NewWriter()).Start(messages[name]) - } - - multi.Start() - - return func(name string, errs ...error) { - if spinners[name] == nil || !spinners[name].IsActive { - return - } - err := errors.Join(errs...) - if err == nil { - spinners[name].Success() - } else { - spinners[name].Fail(fmt.Sprintf("%s: %s", messages[name], err.Error())) - } - time.Sleep(10 * time.Millisecond) - for _, spinner := range spinners { - if spinner.IsActive { - return - } - } - multi.Stop() - } -} diff --git a/cmd/testworkflow-toolkit/env/client.go b/cmd/testworkflow-toolkit/env/client.go index 0f1b244af9..1031cb444c 100644 --- a/cmd/testworkflow-toolkit/env/client.go +++ b/cmd/testworkflow-toolkit/env/client.go @@ -4,6 +4,8 @@ import ( "context" "fmt" "math" + "net/url" + "strconv" corev1 "k8s.io/api/core/v1" @@ -68,12 +70,20 @@ func ImageInspector() imageinspector.Inspector { } func Testkube() client.Client { + uri, err := url.Parse(config2.Config().Worker.Connection.LocalApiUrl) + host := config.APIServerName + port := config.APIServerPort + if err == nil { + host = uri.Hostname() + portStr, _ := strconv.ParseInt(uri.Port(), 10, 32) + port = int(portStr) + } if config2.UseProxy() { - return client.NewProxyAPIClient(Kubernetes(), client.NewAPIConfig(config2.Namespace(), config.APIServerName, config.APIServerPort)) + return client.NewProxyAPIClient(Kubernetes(), client.NewAPIConfig(config2.Namespace(), host, port)) } httpClient := phttp.NewClient(true) sseClient := phttp.NewSSEClient(true) - return client.NewDirectAPIClient(httpClient, sseClient, fmt.Sprintf("http://%s:%d", config.APIServerName, config.APIServerPort), "") + return client.NewDirectAPIClient(httpClient, sseClient, fmt.Sprintf("http://%s:%d", host, port), "") } func Cloud(ctx context.Context) (cloudexecutor.Executor, cloud.TestKubeCloudAPIClient) { From a535434a61a751159b6bf25bd52d99d6981dcdfe Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Fri, 25 Oct 2024 15:54:07 +0200 Subject: [PATCH 03/28] fix: GZip the binaries before sending --- cmd/tcl/kubectl-testkube/devbox/command.go | 42 +++---------------- .../devbox/devutils/objectstorage.go | 39 ++++++++++++++--- 2 files changed, 39 insertions(+), 42 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index e921b01a14..bb98585158 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -231,12 +231,7 @@ func NewDevBoxCommand() *cobra.Command { fmt.Println("Uploading binaries...") g.Go(func() error { its := time.Now() - file, err := os.Open(agentBin.Path()) - if err != nil { - return err - } - defer file.Close() - err = objectStorage.Upload(ctx, "bin/testkube-api-server", file, agentBin.Hash()) + err = objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) if err != nil { fmt.Printf("Agent: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { @@ -246,12 +241,7 @@ func NewDevBoxCommand() *cobra.Command { }) g.Go(func() error { its := time.Now() - file, err := os.Open(toolkitBin.Path()) - if err != nil { - return err - } - defer file.Close() - err = objectStorage.Upload(ctx, "bin/toolkit", file, toolkitBin.Hash()) + err = objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) if err != nil { fmt.Printf("Toolkit: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { @@ -261,12 +251,7 @@ func NewDevBoxCommand() *cobra.Command { }) g.Go(func() error { its := time.Now() - file, err := os.Open(initProcessBin.Path()) - if err != nil { - return err - } - defer file.Close() - err = objectStorage.Upload(ctx, "bin/init", file, initProcessBin.Hash()) + err = objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) if err != nil { fmt.Printf("Init Process: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { @@ -410,12 +395,7 @@ func NewDevBoxCommand() *cobra.Command { fmt.Printf("Agent: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) its = time.Now() - file, err := os.Open(agentBin.Path()) - if err != nil { - return err - } - defer file.Close() - err = objectStorage.Upload(ctx, "bin/testkube-api-server", file, agentBin.Hash()) + err = objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) if err != nil { fmt.Printf("Agent: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err @@ -453,12 +433,7 @@ func NewDevBoxCommand() *cobra.Command { fmt.Printf("Toolkit: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) its = time.Now() - file, err := os.Open(toolkitBin.Path()) - if err != nil { - return err - } - defer file.Close() - err = objectStorage.Upload(ctx, "bin/toolkit", file, toolkitBin.Hash()) + err = objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) if err != nil { fmt.Printf("Toolkit: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err @@ -476,12 +451,7 @@ func NewDevBoxCommand() *cobra.Command { fmt.Printf("Init Process: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) its = time.Now() - file, err := os.Open(initProcessBin.Path()) - if err != nil { - return err - } - defer file.Close() - err = objectStorage.Upload(ctx, "bin/init", file, initProcessBin.Hash()) + err = objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) if err != nil { fmt.Printf("Init Process: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go b/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go index 77a1f62354..41d61bfd12 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go @@ -15,6 +15,8 @@ import ( "fmt" "io" "net/http" + "os" + "path/filepath" "sync" "time" @@ -23,6 +25,7 @@ import ( minio2 "github.com/minio/minio-go/v7" + "github.com/kubeshop/testkube/cmd/testworkflow-toolkit/artifacts" "github.com/kubeshop/testkube/internal/common" "github.com/kubeshop/testkube/pkg/log" "github.com/kubeshop/testkube/pkg/storage/minio" @@ -154,7 +157,7 @@ func (r *ObjectStorage) WaitForReady(ctx context.Context) error { } // TODO: Compress on-fly -func (r *ObjectStorage) Upload(ctx context.Context, path string, reader io.Reader, hash string) error { +func (r *ObjectStorage) Upload(ctx context.Context, path string, fsPath string, hash string) error { c, err := r.Client() if err != nil { return err @@ -162,21 +165,45 @@ func (r *ObjectStorage) Upload(ctx context.Context, path string, reader io.Reade if hash != "" && r.Is(path, hash) { return nil } - putUrl, err := c.PresignedPutObject(ctx, "devbox", path, 15*time.Minute) + //putUrl, err := c.PresignedPutObject(ctx, "devbox", path, 15*time.Minute) + putUrl, err := c.PresignHeader(ctx, "PUT", "devbox", path, 15*time.Minute, nil, http.Header{ + "X-Amz-Meta-Snowball-Auto-Extract": {"true"}, + "X-Amz-Meta-Minio-Snowball-Prefix": {filepath.Dir(path)}, + "Content-Type": {"application/gzip"}, + "Content-Encoding": {"gzip"}, + }) + if err != nil { + return err + } + + file, err := os.Open(fsPath) if err != nil { return err } + defer file.Close() + stat, err := file.Stat() + if err != nil { + return err + } + buf := new(bytes.Buffer) - //g := gzip.NewWriter(buf) - io.Copy(buf, reader) + tarStream := artifacts.NewTarStream() + go func() { + tarStream.Add(filepath.Base(path), file, stat) + tarStream.Close() + }() + io.Copy(buf, tarStream) + req, err := http.NewRequestWithContext(ctx, http.MethodPut, putUrl.String(), buf) if err != nil { return err } req.ContentLength = int64(buf.Len()) + req.Header.Set("X-Amz-Meta-Snowball-Auto-Extract", "true") + req.Header.Set("X-Amz-Meta-Minio-Snowball-Prefix", filepath.Dir(path)) + req.Header.Set("Content-Type", "application/gzip") + req.Header.Set("Content-Encoding", "gzip") - req.Header.Set("Content-Type", "application/octet-stream") - //req.Header.Set("Content-Encoding", "gzip") tr := http.DefaultTransport.(*http.Transport).Clone() tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} client := &http.Client{Transport: tr} From 3065b6ee90b8357d2adf4f996a6a4ff88063aa02 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 10:36:54 +0100 Subject: [PATCH 04/28] fix: small issues with devbox, add dashboard link, add README --- cmd/tcl/devbox-mutating-webhook/main.go | 6 +- cmd/tcl/kubectl-testkube/devbox/README.md | 67 +++++++++++++ cmd/tcl/kubectl-testkube/devbox/command.go | 94 ++++++++++++------- .../kubectl-testkube/devbox/devutils/agent.go | 3 +- .../kubectl-testkube/devbox/devutils/cloud.go | 21 ++++- .../devbox/devutils/crdsync.go | 3 + go.mod | 1 + go.sum | 2 + pkg/cloud/client/environments.go | 1 + 9 files changed, 160 insertions(+), 38 deletions(-) create mode 100644 cmd/tcl/kubectl-testkube/devbox/README.md diff --git a/cmd/tcl/devbox-mutating-webhook/main.go b/cmd/tcl/devbox-mutating-webhook/main.go index e5dbf1617e..32592ae482 100644 --- a/cmd/tcl/devbox-mutating-webhook/main.go +++ b/cmd/tcl/devbox-mutating-webhook/main.go @@ -89,7 +89,7 @@ func main() { script := ` set -e /usr/bin/mc config host add minio "http://devbox-storage:9000" "minioadmin" "minioadmin" - /usr/bin/mc cp --disable-multipart minio/devbox/bin/init /.tk-devbox/init + /usr/bin/mc cp --disable-multipart minio/devbox/bin/init /.tk-devbox/init || exit 1 chmod 777 /.tk-devbox/init chmod +x /.tk-devbox/init ls -lah /.tk-devbox` @@ -97,8 +97,8 @@ func main() { script = ` set -e /usr/bin/mc config host add minio "http://devbox-storage:9000" "minioadmin" "minioadmin" - /usr/bin/mc cp --disable-multipart minio/devbox/bin/init /.tk-devbox/init - /usr/bin/mc cp --disable-multipart minio/devbox/bin/toolkit /.tk-devbox/toolkit + /usr/bin/mc cp --disable-multipart minio/devbox/bin/init /.tk-devbox/init || exit 1 + /usr/bin/mc cp --disable-multipart minio/devbox/bin/toolkit /.tk-devbox/toolkit || exit 1 chmod 777 /.tk-devbox/init chmod 777 /.tk-devbox/toolkit chmod +x /.tk-devbox/init diff --git a/cmd/tcl/kubectl-testkube/devbox/README.md b/cmd/tcl/kubectl-testkube/devbox/README.md new file mode 100644 index 0000000000..accaa64b8c --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/README.md @@ -0,0 +1,67 @@ +# Development Box - TCL Licensed + +This utility is used to help with development of the Agent features (like Test Workflows). + +## How it works + +* It takes current Testkube CLI credentials and create development environment inside +* It deploys the Agent into the current cluster + * Test Triggers are disabled + * Webhooks are disabled + * Legacy Tests and Test Suites are disabled +* For live changes, it deploys Interceptor and Object Storage into the current cluster + * Object Storage stores latest binaries for the Agent, Toolkit and Init Process + * Interceptor loads the Toolkit and Init Process from the Object Storage into every Test Workflow Execution pod + +## Usage + +* Login to Testkube CLI, like `testkube login` +* Run `go run cmd/kubectl-testkube/main.go devbox` + * It's worth to create alias for that in own `.bashrc` or `.bash_profile` + * It's worth to pass a devbox name, like `-n dawid`, so it's not using random name + +The CLI will print a dashboard link for the selected environment. + +## Why? + +It's a fast way to get live changes during the development: +* initial deployment takes up to 60 seconds +* continuous deployments take 1-10 seconds (depending on changes and network bandwidth) +* the Execution performance is not much worse (it's just running single container before, that is only fetching up to 100MB from local Object Storage) + +## Parameters + +Most important parameters are `-n, --name` for devbox static name, +and `-s, --sync` for synchronising Test Workflow and Test Workflow Template CRDs from the file system. + +```shell +Usage: + testkube devbox [flags] + +Aliases: + devbox, dev + +Flags: + --agent-image string base agent image (default "kubeshop/testkube-api-server:latest") + --init-image string base init image (default "kubeshop/testkube-tw-init:latest") + -n, --name string devbox name (default "1730107481990508000") + -s, --sync strings synchronise resources at paths + --toolkit-image string base toolkit image (default "kubeshop/testkube-tw-toolkit:latest") + -y, --yes auto accept without asking for confirmation +``` + +## Example + +```shell +# Initialize alias +tk() { + cd ~/projects/testkube + go run cmd/kubectl-testkube/main.go $@ +} + +# Select the proper cluster to deploy the devbox +kubectx cloud-dev + +# Run development box, synchronising all the Test Workflows from 'test' directory in Testkube repository +tk devbox -n dawid -s test +``` diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index bb98585158..ca40c6a0e1 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -29,6 +29,8 @@ import ( "github.com/kubeshop/testkube/pkg/cloud/client" "github.com/kubeshop/testkube/pkg/mapper/testworkflows" "github.com/kubeshop/testkube/pkg/ui" + + "github.com/savioxavier/termlink" ) const ( @@ -303,6 +305,9 @@ func NewDevBoxCommand() *cobra.Command { break } file, err := yamlWatcher.Next(ctx) + if !strings.HasSuffix(file, ".yml") && !strings.HasSuffix(file, ".yaml") { + continue + } if err == nil { _ = sync.Load(file) } @@ -311,15 +316,8 @@ func NewDevBoxCommand() *cobra.Command { // Propagate changes from CRDSync to Cloud go func() { - parallel := make(chan struct{}, 30) - for { - if ctx.Err() != nil { - break - } - update, err := sync.Next(ctx) - if err != nil { - continue - } + parallel := make(chan struct{}, 10) + process := func(update *devutils.CRDSyncUpdate) { parallel <- struct{}{} switch update.Op { case devutils.CRDSyncUpdateOpCreate: @@ -331,13 +329,17 @@ func NewDevBoxCommand() *cobra.Command { update.Template.Spec.Events = nil // ignore Cronjobs _, err := client.CreateTestWorkflowTemplate(*testworkflows.MapTemplateKubeToAPI(update.Template)) if err != nil { - fmt.Printf("Failed to create Test Workflow Template: %s: %s\n", update.Template.Name, err.Error()) + fmt.Printf("CRD Sync: creating template: %s: error: %s\n", update.Template.Name, err.Error()) + } else { + fmt.Println("CRD Sync: created template:", update.Template.Name) } } else { update.Workflow.Spec.Events = nil // ignore Cronjobs _, err := client.CreateTestWorkflow(*testworkflows.MapKubeToAPI(update.Workflow)) if err != nil { - fmt.Printf("Failed to create Test Workflow: %s: %s\n", update.Workflow.Name, err.Error()) + fmt.Printf("CRD Sync: creating workflow: %s: error: %s\n", update.Workflow.Name, err.Error()) + } else { + fmt.Println("CRD Sync: created workflow:", update.Workflow.Name) } } case devutils.CRDSyncUpdateOpUpdate: @@ -349,13 +351,17 @@ func NewDevBoxCommand() *cobra.Command { update.Template.Spec.Events = nil // ignore Cronjobs _, err := client.UpdateTestWorkflowTemplate(*testworkflows.MapTemplateKubeToAPI(update.Template)) if err != nil { - fmt.Printf("Failed to update Test Workflow Template: %s: %s\n", update.Template.Name, err.Error()) + fmt.Printf("CRD Sync: updating template: %s: error: %s\n", update.Template.Name, err.Error()) + } else { + fmt.Println("CRD Sync: updated template:", update.Template.Name) } } else { update.Workflow.Spec.Events = nil _, err := client.UpdateTestWorkflow(*testworkflows.MapKubeToAPI(update.Workflow)) if err != nil { - fmt.Printf("Failed to update Test Workflow: %s: %s\n", update.Workflow.Name, err.Error()) + fmt.Printf("CRD Sync: updating workflow: %s: error: %s\n", update.Workflow.Name, err.Error()) + } else { + fmt.Println("CRD Sync: updated workflow:", update.Workflow.Name) } } case devutils.CRDSyncUpdateOpDelete: @@ -366,17 +372,31 @@ func NewDevBoxCommand() *cobra.Command { if update.Template != nil { err := client.DeleteTestWorkflowTemplate(update.Template.Name) if err != nil { - fmt.Printf("Failed to delete Test Workflow Template: %s: %s\n", update.Template.Name, err.Error()) + fmt.Printf("CRD Sync: deleting template: %s: error: %s\n", update.Template.Name, err.Error()) + } else { + fmt.Println("CRD Sync: deleted template:", update.Template.Name) } } else { err := client.DeleteTestWorkflow(update.Workflow.Name) if err != nil { - fmt.Printf("Failed to delete Test Workflow: %s: %s\n", update.Workflow.Name, err.Error()) + fmt.Printf("CRD Sync: deleting workflow: %s: error: %s\n", update.Workflow.Name, err.Error()) + } else { + fmt.Println("CRD Sync: deleted workflow:", update.Workflow.Name) } } } <-parallel } + for { + if ctx.Err() != nil { + break + } + update, err := sync.Next(ctx) + if err != nil { + continue + } + go process(update) + } }() } @@ -384,41 +404,42 @@ func NewDevBoxCommand() *cobra.Command { rebuild := func(ctx context.Context) { g, _ := errgroup.WithContext(ctx) - fmt.Println("Rebuilding binaries...") + ts := time.Now() + fmt.Println("Rebuilding applications...") g.Go(func() error { its := time.Now() _, err := agentBin.Build(ctx) if err != nil { - fmt.Printf("Agent: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + fmt.Printf(" Agent: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf("Agent: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf(" Agent: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) its = time.Now() err = objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) if err != nil { - fmt.Printf("Agent: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + fmt.Printf(" Agent: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf("Agent: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf(" Agent: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) - // TODO: Restart only if it has changes + // Restart only if it has changes - TODO: do in a nicer way if time.Since(its).Truncate(time.Millisecond).String() != "0s" { err := agentPod.Restart(ctx) if err == nil { - fmt.Printf("Agent: restarted. Waiting for readiness...\n") + fmt.Printf(" Agent: restarted. Waiting for readiness...\n") _ = agentPod.RefreshData(ctx) err = agentPod.WaitForReady(ctx) if ctx.Err() != nil { return nil } if err == nil { - fmt.Printf("Agent: ready again\n") + fmt.Printf(" Agent: ready again\n") } else { fail(errors.Wrap(err, "failed to wait for agent pod readiness")) } } else { - fmt.Printf("Agent: restart failed: %s\n", err.Error()) + fmt.Printf(" Agent: restart failed: %s\n", err.Error()) } } return nil @@ -427,39 +448,48 @@ func NewDevBoxCommand() *cobra.Command { its := time.Now() _, err := toolkitBin.Build(ctx) if err != nil { - fmt.Printf("Toolkit: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + fmt.Printf(" Toolkit: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf("Toolkit: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf(" Toolkit: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) its = time.Now() err = objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) if err != nil { - fmt.Printf("Toolkit: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + fmt.Printf(" Toolkit: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf("Toolkit: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf(" Toolkit: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) return nil }) g.Go(func() error { its := time.Now() _, err := initProcessBin.Build(ctx) if err != nil { - fmt.Printf("Init Process: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + fmt.Printf(" Init Process: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf("Init Process: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf(" Init Process: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) its = time.Now() err = objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) if err != nil { - fmt.Printf("Init Process: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + fmt.Printf(" Init Process: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf("Init Process: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf(" Init Process: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) return nil }) err = g.Wait() + if ctx.Err() != nil { + fmt.Println("Applications synchronised in", time.Since(ts)) + } + } + + if termlink.SupportsHyperlinks() { + fmt.Println("Dashboard:", termlink.Link(cloud.DashboardUrl(env.Slug, "dashboard/test-workflows"), cloud.DashboardUrl(env.Slug, "dashboard/test-workflows"))) + } else { + fmt.Println("Dashboard:", cloud.DashboardUrl(env.Slug, "dashboard/test-workflows")) } rebuildCtx, rebuildCtxCancel := context.WithCancel(ctx) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go b/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go index 22c71f9f97..0ea4da3ff0 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go @@ -57,9 +57,8 @@ func (r *Agent) Create(ctx context.Context, env *client.Environment) error { ImagePullPolicy: corev1.PullIfNotPresent, Command: []string{"/bin/sh", "-c"}, Args: []string{` - set -e /usr/bin/mc config host add minio "http://devbox-storage:9000" "minioadmin" "minioadmin" - /usr/bin/mc cp --disable-multipart minio/devbox/bin/testkube-api-server /.tk-devbox/testkube-api-server + /usr/bin/mc cp --disable-multipart minio/devbox/bin/testkube-api-server /.tk-devbox/testkube-api-server || exit 1 chmod 777 /.tk-devbox/testkube-api-server chmod +x /.tk-devbox/testkube-api-server ls -lah /.tk-devbox`}, diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/cloud.go b/cmd/tcl/kubectl-testkube/devbox/devutils/cloud.go index 296d8183a9..98c5547815 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/cloud.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/cloud.go @@ -103,6 +103,7 @@ func (c *cloudObj) Client(environmentId string) (client2.Client, error) { defer c.clientMu.Unlock() if c.client == nil || c.clientTs.Add(5*time.Minute).Before(time.Now()) { + fmt.Println("Creating new Cloud client") common2.GetClient(c.cmd) // refresh token var err error c.client, err = client2.GetClient(client2.ClientCloud, client2.Options{ @@ -154,7 +155,25 @@ func (c *cloudObj) CreateEnvironment(name string) (*client.Environment, error) { if err != nil { return nil, err } - c.list = append(c.list, env) + // TODO: POST request is not returning slug - if it will, delete the fallback path + if env.Slug != "" { + c.list = append(c.list, env) + } else { + err = c.UpdateList() + if err != nil { + return nil, err + } + for i := range c.list { + if c.list[i].Id == env.Id { + env = c.list[i] + break + } + } + } + // Hack to build proper URLs even when slug is missing + if env.Slug == "" { + env.Slug = env.Id + } return &env, nil } diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/crdsync.go b/cmd/tcl/kubectl-testkube/devbox/devutils/crdsync.go index dc47f89150..44c28a3db0 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/crdsync.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/crdsync.go @@ -169,6 +169,8 @@ func (c *CRDSync) loadFile(path string) error { return nil } + defer c.emitter.Emit() + // Parse the YAML file file, err := os.Open(path) if err != nil { @@ -176,6 +178,7 @@ func (c *CRDSync) loadFile(path string) error { return nil } + // TODO: Handle deleted entries decoder := yaml.NewDecoder(file) for { var obj map[string]interface{} diff --git a/go.mod b/go.mod index af254a6743..ede10cac42 100644 --- a/go.mod +++ b/go.mod @@ -185,6 +185,7 @@ require ( github.com/rs/xid v1.4.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.0.0 // indirect + github.com/savioxavier/termlink v1.4.1 // indirect github.com/savsgio/gotils v0.0.0-20211223103454-d0aaa54c5899 // indirect github.com/segmentio/backo-go v1.0.0 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect diff --git a/go.sum b/go.sum index 6f853661bc..1db8936932 100644 --- a/go.sum +++ b/go.sum @@ -556,6 +556,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/santhosh-tekuri/jsonschema/v5 v5.0.0 h1:TToq11gyfNlrMFZiYujSekIsPd9AmsA2Bj/iv+s4JHE= github.com/santhosh-tekuri/jsonschema/v5 v5.0.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= +github.com/savioxavier/termlink v1.4.1 h1:pFcd+XH8iQjL+2mB4buCDUo+CMt5kKsr8jGG+VLfYAg= +github.com/savioxavier/termlink v1.4.1/go.mod h1:5T5ePUlWbxCHIwyF8/Ez1qufOoGM89RCg9NvG+3G3gc= github.com/savsgio/gotils v0.0.0-20211223103454-d0aaa54c5899 h1:Orn7s+r1raRTBKLSc9DmbktTT04sL+vkzsbRD2Q8rOI= github.com/savsgio/gotils v0.0.0-20211223103454-d0aaa54c5899/go.mod h1:oejLrk1Y/5zOF+c/aHtXqn3TFlzzbAgPWg8zBiAHDas= github.com/segmentio/analytics-go/v3 v3.2.1 h1:G+f90zxtc1p9G+WigVyTR0xNfOghOGs/PYAlljLOyeg= diff --git a/pkg/cloud/client/environments.go b/pkg/cloud/client/environments.go index e081eb98fb..924b347713 100644 --- a/pkg/cloud/client/environments.go +++ b/pkg/cloud/client/environments.go @@ -18,6 +18,7 @@ func NewEnvironmentsClient(baseUrl, token, orgID string) *EnvironmentsClient { type Environment struct { Name string `json:"name"` Id string `json:"id"` + Slug string `json:"slug"` Connected bool `json:"connected"` Owner string `json:"owner"` InstallCommand string `json:"installCommand,omitempty"` From 965b78446f217df33bf074ff9edfffd237614c27 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 10:49:50 +0100 Subject: [PATCH 05/28] feat: parallelize devbox better --- cmd/tcl/kubectl-testkube/devbox/command.go | 188 +++++++++++---------- 1 file changed, 99 insertions(+), 89 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index ca40c6a0e1..bee5415ff4 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -19,6 +19,7 @@ import ( "syscall" "time" + "github.com/gookit/color" "github.com/pkg/errors" "github.com/pterm/pterm" "github.com/spf13/cobra" @@ -63,6 +64,8 @@ func NewDevBoxCommand() *cobra.Command { ctxCancel() }() + startTs := time.Now() + // Find repository root rootDir := devutils.FindDirContaining(InterceptorMainPath, AgentMainPath, ToolkitMainPath, InitProcessMainPath) if rootDir == "" { @@ -118,52 +121,6 @@ func NewDevBoxCommand() *cobra.Command { interceptor := devutils.NewInterceptor(interceptorPod, baseInitImage, baseToolkitImage, interceptorBin) agent := devutils.NewAgent(agentPod, cloud, baseAgentImage, baseInitImage, baseToolkitImage) objectStorage := devutils.NewObjectStorage(objectStoragePod) - - // Build initial binaries - g, _ := errgroup.WithContext(ctx) - fmt.Println("Building initial binaries...") - g.Go(func() error { - its := time.Now() - _, err := interceptorBin.Build(ctx) - if err != nil { - fmt.Printf("Interceptor: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) - } else { - fmt.Printf("Interceptor: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) - } - return err - }) - g.Go(func() error { - its := time.Now() - _, err := agentBin.Build(ctx) - if err != nil { - fmt.Printf("Agent: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) - } else { - fmt.Printf("Agent: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) - } - return err - }) - g.Go(func() error { - its := time.Now() - _, err := toolkitBin.Build(ctx) - if err != nil { - fmt.Printf("Toolkit: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) - } else { - fmt.Printf("Toolkit: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) - } - return err - }) - g.Go(func() error { - its := time.Now() - _, err := initProcessBin.Build(ctx) - if err != nil { - fmt.Printf("Init Process: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) - } else { - fmt.Printf("Init Process: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) - } - return err - }) - err = g.Wait() - var env *client.Environment // Cleanup @@ -208,76 +165,128 @@ func NewDevBoxCommand() *cobra.Command { fail(errors.Wrap(err, "failed to create namespace")) } + g, _ := errgroup.WithContext(ctx) + objectStorageReadiness := make(chan struct{}) + // Deploy object storage - fmt.Println("Creating object storage...") - if err = objectStorage.Create(ctx); err != nil { - fail(errors.Wrap(err, "failed to create object storage")) - } - fmt.Println("Waiting for object storage readiness...") - if err = objectStorage.WaitForReady(ctx); err != nil { - fail(errors.Wrap(err, "failed to wait for readiness")) - } + g.Go(func() error { + fmt.Println("[Object Storage] Creating...") + if err = objectStorage.Create(ctx); err != nil { + fail(errors.Wrap(err, "failed to create object storage")) + } + fmt.Println("[Object Storage] Waiting for readiness...") + if err = objectStorage.WaitForReady(ctx); err != nil { + fail(errors.Wrap(err, "failed to wait for readiness")) + } + fmt.Println("[Object Storage] Ready") + close(objectStorageReadiness) + return nil + }) // Deploying interceptor - fmt.Println("Deploying interceptor...") - if err = interceptor.Create(ctx); err != nil { - fail(errors.Wrap(err, "failed to create interceptor")) - } - fmt.Println("Waiting for interceptor readiness...") - if err = interceptor.WaitForReady(ctx); err != nil { - fail(errors.Wrap(err, "failed to create interceptor")) - } + g.Go(func() error { + fmt.Println("[Interceptor] Building...") + its := time.Now() + _, err := interceptorBin.Build(ctx) + if err != nil { + fmt.Printf("[Interceptor] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + } else { + fmt.Printf("[Interceptor] Built in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + fmt.Println("[Interceptor] Deploying...") + if err = interceptor.Create(ctx); err != nil { + fail(errors.Wrap(err, "failed to create interceptor")) + } + fmt.Println("[Interceptor] Waiting for readiness...") + if err = interceptor.WaitForReady(ctx); err != nil { + fail(errors.Wrap(err, "failed to create interceptor")) + } + fmt.Println("[Interceptor] Enabling...") + if err = interceptor.Enable(ctx); err != nil { + fail(errors.Wrap(err, "failed to enable interceptor")) + } + fmt.Println("[Interceptor] Ready") + return nil + }) - // Uploading binaries - g, _ = errgroup.WithContext(ctx) - fmt.Println("Uploading binaries...") + // Deploying the Agent g.Go(func() error { + fmt.Println("[Agent] Building...") its := time.Now() + _, err := agentBin.Build(ctx) + if err != nil { + fmt.Printf("[Agent] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + } else { + fmt.Printf("[Agent] Built in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + <-objectStorageReadiness + fmt.Println("[Agent] Uploading...") + its = time.Now() err = objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) if err != nil { - fmt.Printf("Agent: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + fmt.Printf("[Agent] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { - fmt.Printf("Agent: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf("[Agent] Uploaded in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + fmt.Println("[Agent] Deploying...") + if err = agent.Create(ctx, env); err != nil { + fail(errors.Wrap(err, "failed to create agent")) } - return err + fmt.Println("[Agent] Waiting for readiness...") + if err = agent.WaitForReady(ctx); err != nil { + fail(errors.Wrap(err, "failed to create agent")) + } + fmt.Println("[Agent] Ready...") + return nil }) + + // Building Toolkit g.Go(func() error { + fmt.Println("[Toolkit] Building...") its := time.Now() + _, err := toolkitBin.Build(ctx) + if err != nil { + fmt.Printf("[Toolkit] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + } else { + fmt.Printf("[Toolkit] Built in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + <-objectStorageReadiness + fmt.Println("[Toolkit] Uploading...") + its = time.Now() err = objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) if err != nil { - fmt.Printf("Toolkit: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + fmt.Printf("[Toolkit] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { - fmt.Printf("Toolkit: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf("[Toolkit] Uploaded in %s.\n", time.Since(its).Truncate(time.Millisecond)) } - return err + return nil }) + + // Building Init Process g.Go(func() error { + fmt.Println("[Init Process] Building...") its := time.Now() + _, err := initProcessBin.Build(ctx) + if err != nil { + fmt.Printf("[Init Process] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + } else { + fmt.Printf("[Init Process] Built in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + <-objectStorageReadiness + fmt.Println("[Init Process] Uploading...") + its = time.Now() err = objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) if err != nil { - fmt.Printf("Init Process: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + fmt.Printf("[Init Process] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { - fmt.Printf("Init Process: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf("[Init Process] Uploaded in %s.\n", time.Since(its).Truncate(time.Millisecond)) } - return err + return nil }) - err = g.Wait() - // Enabling Pod interceptor - fmt.Println("Enabling interceptor...") - if err = interceptor.Enable(ctx); err != nil { - fail(errors.Wrap(err, "failed to enable interceptor")) - } + g.Wait() - // Deploying agent - fmt.Println("Deploying agent...") - if err = agent.Create(ctx, env); err != nil { - fail(errors.Wrap(err, "failed to create agent")) - } - fmt.Println("Waiting for agent readiness...") - if err = agent.WaitForReady(ctx); err != nil { - fail(errors.Wrap(err, "failed to create agent")) - } + // Live synchronisation fmt.Println("Creating file system watcher...") goWatcher, err := devutils.NewFsWatcher(rootDir) if err != nil { @@ -486,6 +495,7 @@ func NewDevBoxCommand() *cobra.Command { } } + fmt.Printf(color.Green.Render("Development box is ready. Took %s\n"), time.Since(startTs)) if termlink.SupportsHyperlinks() { fmt.Println("Dashboard:", termlink.Link(cloud.DashboardUrl(env.Slug, "dashboard/test-workflows"), cloud.DashboardUrl(env.Slug, "dashboard/test-workflows"))) } else { From 8a905e56ac2d4f54215c5460b9e6821e699b13e6 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 10:55:34 +0100 Subject: [PATCH 06/28] chore: add links for CRD Sync workflows and templates --- cmd/tcl/kubectl-testkube/devbox/command.go | 38 +++++++++++++------ .../kubectl-testkube/devbox/devutils/cloud.go | 1 - 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index bee5415ff4..42e8d24bb9 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -323,6 +323,20 @@ func NewDevBoxCommand() *cobra.Command { } }() + workflowLabel := func(name string) string { + if !termlink.SupportsHyperlinks() { + return name + } + return name + " " + termlink.ColorLink("(open)", cloud.DashboardUrl(env.Slug, fmt.Sprintf("dashboard/test-workflows/%s", name)), "green") + } + + templateLabel := func(name string) string { + if !termlink.SupportsHyperlinks() { + return name + } + return termlink.Link(name, cloud.DashboardUrl(env.Slug, fmt.Sprintf("dashboard/test-workflow-templates/%s", name))) + } + // Propagate changes from CRDSync to Cloud go func() { parallel := make(chan struct{}, 10) @@ -338,17 +352,17 @@ func NewDevBoxCommand() *cobra.Command { update.Template.Spec.Events = nil // ignore Cronjobs _, err := client.CreateTestWorkflowTemplate(*testworkflows.MapTemplateKubeToAPI(update.Template)) if err != nil { - fmt.Printf("CRD Sync: creating template: %s: error: %s\n", update.Template.Name, err.Error()) + fmt.Printf("CRD Sync: creating template: %s: error: %s\n", templateLabel(update.Template.Name), err.Error()) } else { - fmt.Println("CRD Sync: created template:", update.Template.Name) + fmt.Println("CRD Sync: created template:", templateLabel(update.Template.Name)) } } else { update.Workflow.Spec.Events = nil // ignore Cronjobs _, err := client.CreateTestWorkflow(*testworkflows.MapKubeToAPI(update.Workflow)) if err != nil { - fmt.Printf("CRD Sync: creating workflow: %s: error: %s\n", update.Workflow.Name, err.Error()) + fmt.Printf("CRD Sync: creating workflow: %s: error: %s\n", workflowLabel(update.Workflow.Name), err.Error()) } else { - fmt.Println("CRD Sync: created workflow:", update.Workflow.Name) + fmt.Println("CRD Sync: created workflow:", workflowLabel(update.Workflow.Name)) } } case devutils.CRDSyncUpdateOpUpdate: @@ -360,17 +374,17 @@ func NewDevBoxCommand() *cobra.Command { update.Template.Spec.Events = nil // ignore Cronjobs _, err := client.UpdateTestWorkflowTemplate(*testworkflows.MapTemplateKubeToAPI(update.Template)) if err != nil { - fmt.Printf("CRD Sync: updating template: %s: error: %s\n", update.Template.Name, err.Error()) + fmt.Printf("CRD Sync: updating template: %s: error: %s\n", templateLabel(update.Template.Name), err.Error()) } else { - fmt.Println("CRD Sync: updated template:", update.Template.Name) + fmt.Println("CRD Sync: updated template:", templateLabel(update.Template.Name)) } } else { update.Workflow.Spec.Events = nil _, err := client.UpdateTestWorkflow(*testworkflows.MapKubeToAPI(update.Workflow)) if err != nil { - fmt.Printf("CRD Sync: updating workflow: %s: error: %s\n", update.Workflow.Name, err.Error()) + fmt.Printf("CRD Sync: updating workflow: %s: error: %s\n", workflowLabel(update.Workflow.Name), err.Error()) } else { - fmt.Println("CRD Sync: updated workflow:", update.Workflow.Name) + fmt.Println("CRD Sync: updated workflow:", workflowLabel(update.Workflow.Name)) } } case devutils.CRDSyncUpdateOpDelete: @@ -381,16 +395,16 @@ func NewDevBoxCommand() *cobra.Command { if update.Template != nil { err := client.DeleteTestWorkflowTemplate(update.Template.Name) if err != nil { - fmt.Printf("CRD Sync: deleting template: %s: error: %s\n", update.Template.Name, err.Error()) + fmt.Printf("CRD Sync: deleting template: %s: error: %s\n", templateLabel(update.Template.Name), err.Error()) } else { - fmt.Println("CRD Sync: deleted template:", update.Template.Name) + fmt.Println("CRD Sync: deleted template:", templateLabel(update.Template.Name)) } } else { err := client.DeleteTestWorkflow(update.Workflow.Name) if err != nil { - fmt.Printf("CRD Sync: deleting workflow: %s: error: %s\n", update.Workflow.Name, err.Error()) + fmt.Printf("CRD Sync: deleting workflow: %s: error: %s\n", workflowLabel(update.Workflow.Name), err.Error()) } else { - fmt.Println("CRD Sync: deleted workflow:", update.Workflow.Name) + fmt.Println("CRD Sync: deleted workflow:", workflowLabel(update.Workflow.Name)) } } } diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/cloud.go b/cmd/tcl/kubectl-testkube/devbox/devutils/cloud.go index 98c5547815..69b6f77a17 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/cloud.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/cloud.go @@ -103,7 +103,6 @@ func (c *cloudObj) Client(environmentId string) (client2.Client, error) { defer c.clientMu.Unlock() if c.client == nil || c.clientTs.Add(5*time.Minute).Before(time.Now()) { - fmt.Println("Creating new Cloud client") common2.GetClient(c.cmd) // refresh token var err error c.client, err = client2.GetClient(client2.ClientCloud, client2.Options{ From 1c446e4a880632133cefc674f9b1101e409d0696 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 11:14:50 +0100 Subject: [PATCH 07/28] chore: adjust devbox messages --- cmd/tcl/kubectl-testkube/devbox/README.md | 1 + cmd/tcl/kubectl-testkube/devbox/command.go | 40 ++++++++++++------- .../devbox/devutils/objectstorage.go | 22 +++++----- 3 files changed, 36 insertions(+), 27 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/README.md b/cmd/tcl/kubectl-testkube/devbox/README.md index accaa64b8c..6064007d32 100644 --- a/cmd/tcl/kubectl-testkube/devbox/README.md +++ b/cmd/tcl/kubectl-testkube/devbox/README.md @@ -9,6 +9,7 @@ This utility is used to help with development of the Agent features (like Test W * Test Triggers are disabled * Webhooks are disabled * Legacy Tests and Test Suites are disabled + * It's not using Helm Chart, so default templates are not available * For live changes, it deploys Interceptor and Object Storage into the current cluster * Object Storage stores latest binaries for the Agent, Toolkit and Init Process * Interceptor loads the Toolkit and Init Process from the Object Storage into every Test Workflow Execution pod diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index 42e8d24bb9..6cb7d69e76 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -222,7 +222,7 @@ func NewDevBoxCommand() *cobra.Command { <-objectStorageReadiness fmt.Println("[Agent] Uploading...") its = time.Now() - err = objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) + _, err = objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) if err != nil { fmt.Printf("[Agent] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { @@ -253,7 +253,7 @@ func NewDevBoxCommand() *cobra.Command { <-objectStorageReadiness fmt.Println("[Toolkit] Uploading...") its = time.Now() - err = objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) + _, err = objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) if err != nil { fmt.Printf("[Toolkit] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { @@ -275,7 +275,7 @@ func NewDevBoxCommand() *cobra.Command { <-objectStorageReadiness fmt.Println("[Init Process] Uploading...") its = time.Now() - err = objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) + _, err = objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) if err != nil { fmt.Printf("[Init Process] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { @@ -327,7 +327,7 @@ func NewDevBoxCommand() *cobra.Command { if !termlink.SupportsHyperlinks() { return name } - return name + " " + termlink.ColorLink("(open)", cloud.DashboardUrl(env.Slug, fmt.Sprintf("dashboard/test-workflows/%s", name)), "green") + return name + " " + termlink.ColorLink("(open)", cloud.DashboardUrl(env.Slug, fmt.Sprintf("dashboard/test-workflows/%s", name)), "magenta") } templateLabel := func(name string) string { @@ -428,7 +428,7 @@ func NewDevBoxCommand() *cobra.Command { rebuild := func(ctx context.Context) { g, _ := errgroup.WithContext(ctx) ts := time.Now() - fmt.Println("Rebuilding applications...") + fmt.Println(color.Yellow.Render("Rebuilding applications...")) g.Go(func() error { its := time.Now() _, err := agentBin.Build(ctx) @@ -439,15 +439,17 @@ func NewDevBoxCommand() *cobra.Command { fmt.Printf(" Agent: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) its = time.Now() - err = objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) + cached, err := objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) if err != nil { fmt.Printf(" Agent: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf(" Agent: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + if cached { + fmt.Printf(" Agent: no changes.\n") + } else { + fmt.Printf(" Agent: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) - // Restart only if it has changes - TODO: do in a nicer way - if time.Since(its).Truncate(time.Millisecond).String() != "0s" { + // Restart only if it has changes err := agentPod.Restart(ctx) if err == nil { fmt.Printf(" Agent: restarted. Waiting for readiness...\n") @@ -477,12 +479,16 @@ func NewDevBoxCommand() *cobra.Command { fmt.Printf(" Toolkit: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) its = time.Now() - err = objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) + cached, err := objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) if err != nil { fmt.Printf(" Toolkit: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf(" Toolkit: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + if cached { + fmt.Printf(" Toolkit: no changes.\n") + } else { + fmt.Printf(" Toolkit: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } return nil }) g.Go(func() error { @@ -495,17 +501,21 @@ func NewDevBoxCommand() *cobra.Command { fmt.Printf(" Init Process: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) its = time.Now() - err = objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) + cached, err := objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) if err != nil { fmt.Printf(" Init Process: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf(" Init Process: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + if cached { + fmt.Printf(" Init Process: no changes.\n") + } else { + fmt.Printf(" Init Process: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } return nil }) err = g.Wait() - if ctx.Err() != nil { - fmt.Println("Applications synchronised in", time.Since(ts)) + if ctx.Err() == nil { + fmt.Println(color.Green.Render(fmt.Sprintf("Applications updated in %s", time.Since(ts)))) } } diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go b/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go index 41d61bfd12..95c7c68798 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go @@ -156,16 +156,14 @@ func (r *ObjectStorage) WaitForReady(ctx context.Context) error { return r.pod.WaitForReady(ctx) } -// TODO: Compress on-fly -func (r *ObjectStorage) Upload(ctx context.Context, path string, fsPath string, hash string) error { +func (r *ObjectStorage) Upload(ctx context.Context, path string, fsPath string, hash string) (bool, error) { c, err := r.Client() if err != nil { - return err + return false, err } if hash != "" && r.Is(path, hash) { - return nil + return true, nil } - //putUrl, err := c.PresignedPutObject(ctx, "devbox", path, 15*time.Minute) putUrl, err := c.PresignHeader(ctx, "PUT", "devbox", path, 15*time.Minute, nil, http.Header{ "X-Amz-Meta-Snowball-Auto-Extract": {"true"}, "X-Amz-Meta-Minio-Snowball-Prefix": {filepath.Dir(path)}, @@ -173,17 +171,17 @@ func (r *ObjectStorage) Upload(ctx context.Context, path string, fsPath string, "Content-Encoding": {"gzip"}, }) if err != nil { - return err + return false, err } file, err := os.Open(fsPath) if err != nil { - return err + return false, err } defer file.Close() stat, err := file.Stat() if err != nil { - return err + return false, err } buf := new(bytes.Buffer) @@ -196,7 +194,7 @@ func (r *ObjectStorage) Upload(ctx context.Context, path string, fsPath string, req, err := http.NewRequestWithContext(ctx, http.MethodPut, putUrl.String(), buf) if err != nil { - return err + return false, err } req.ContentLength = int64(buf.Len()) req.Header.Set("X-Amz-Meta-Snowball-Auto-Extract", "true") @@ -209,12 +207,12 @@ func (r *ObjectStorage) Upload(ctx context.Context, path string, fsPath string, client := &http.Client{Transport: tr} res, err := client.Do(req) if err != nil { - return err + return false, err } if res.StatusCode != http.StatusOK { b, _ := io.ReadAll(res.Body) - return fmt.Errorf("failed saving file: status code: %d / message: %s", res.StatusCode, string(b)) + return false, fmt.Errorf("failed saving file: status code: %d / message: %s", res.StatusCode, string(b)) } r.SetHash(path, hash) - return nil + return false, nil } From 4fefdca6c9937105307704562c68c0617e573665 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 11:28:30 +0100 Subject: [PATCH 08/28] chore: reduce size of binaries for devbox --- cmd/tcl/kubectl-testkube/devbox/command.go | 6 +++--- .../kubectl-testkube/devbox/devutils/binary.go | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index 6cb7d69e76..33d6793d9f 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -436,7 +436,7 @@ func NewDevBoxCommand() *cobra.Command { fmt.Printf(" Agent: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf(" Agent: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf(" Agent: build finished in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), agentBin.Size()) its = time.Now() cached, err := objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) @@ -476,7 +476,7 @@ func NewDevBoxCommand() *cobra.Command { fmt.Printf(" Toolkit: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf(" Toolkit: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf(" Toolkit: build finished in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), toolkitBin.Size()) its = time.Now() cached, err := objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) @@ -498,7 +498,7 @@ func NewDevBoxCommand() *cobra.Command { fmt.Printf(" Init Process: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf(" Init Process: build finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf(" Init Process: build finished in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), initProcessBin.Size()) its = time.Now() cached, err := objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/binary.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binary.go index cd49245b61..73327d15be 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/binary.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binary.go @@ -19,6 +19,8 @@ import ( "strings" "sync" + "github.com/dustin/go-humanize" + "github.com/kubeshop/testkube/pkg/tmp" ) @@ -69,6 +71,16 @@ func (b *Binary) Path() string { return b.outputPath } +func (b *Binary) Size() string { + b.buildMu.RLock() + defer b.buildMu.RUnlock() + stat, err := os.Stat(b.outputPath) + if err != nil { + return "" + } + return humanize.Bytes(uint64(stat.Size())) +} + func (b *Binary) Build(ctx context.Context) (string, error) { b.buildMu.Lock() defer b.buildMu.Unlock() @@ -83,6 +95,9 @@ func (b *Binary) Build(ctx context.Context) (string, error) { "-X github.com/kubeshop/testkube/pkg/telemetry.TestkubeMeasurementSecret=", "-X github.com/kubeshop/testkube/internal/pkg/api.Version=devbox", "-X github.com/kubeshop/testkube/internal/pkg/api.Commit=000000000", + "-s", + "-w", + "-v", }, " ")), "./main.go", ) From 3b1cc3d86ddac458a1af5808c9042179de0e5117 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 11:45:22 +0100 Subject: [PATCH 09/28] fix: reduce Init Process size from 35MB to 5MB --- cmd/testworkflow-init/commands/setup.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/testworkflow-init/commands/setup.go b/cmd/testworkflow-init/commands/setup.go index 4cd0b9ec1c..004b2eda41 100644 --- a/cmd/testworkflow-init/commands/setup.go +++ b/cmd/testworkflow-init/commands/setup.go @@ -9,10 +9,14 @@ import ( "github.com/kubeshop/testkube/cmd/testworkflow-init/data" "github.com/kubeshop/testkube/cmd/testworkflow-init/output" "github.com/kubeshop/testkube/pkg/testworkflows/testworkflowprocessor/action/actiontypes/lite" - constants2 "github.com/kubeshop/testkube/pkg/testworkflows/testworkflowprocessor/constants" "github.com/kubeshop/testkube/pkg/version" ) +// Moved from testworkflowprocessor/constants to reduce init process size +const ( + defaultInitImageBusyboxBinaryPath = "/.tktw-bin" +) + func Setup(config lite.ActionSetup) error { stdout := output.Std stdoutUnsafe := stdout.Direct() @@ -50,7 +54,7 @@ func Setup(config lite.ActionSetup) error { if config.CopyBinaries { // Use `cp` on the whole directory, as it has plenty of files, which lead to the same FS block. // Copying individual files will lead to high FS usage - err := exec.Command("cp", "-rf", constants2.DefaultInitImageBusyboxBinaryPath, data.InternalBinPath).Run() + err := exec.Command("cp", "-rf", defaultInitImageBusyboxBinaryPath, data.InternalBinPath).Run() if err != nil { stdoutUnsafe.Error(" error\n") stdoutUnsafe.Errorf(" failed to copy the binaries: %s\n", err.Error()) From bd2cf80e7f161a6820edb525705a6b9be3a441b2 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 11:46:22 +0100 Subject: [PATCH 10/28] chore: round time in devbox --- cmd/tcl/kubectl-testkube/devbox/command.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index 33d6793d9f..590ce7d4ee 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -515,11 +515,11 @@ func NewDevBoxCommand() *cobra.Command { }) err = g.Wait() if ctx.Err() == nil { - fmt.Println(color.Green.Render(fmt.Sprintf("Applications updated in %s", time.Since(ts)))) + fmt.Println(color.Green.Render(fmt.Sprintf("Applications updated in %s", time.Since(ts).Truncate(time.Millisecond)))) } } - fmt.Printf(color.Green.Render("Development box is ready. Took %s\n"), time.Since(startTs)) + fmt.Printf(color.Green.Render("Development box is ready. Took %s\n"), time.Since(startTs).Truncate(time.Millisecond)) if termlink.SupportsHyperlinks() { fmt.Println("Dashboard:", termlink.Link(cloud.DashboardUrl(env.Slug, "dashboard/test-workflows"), cloud.DashboardUrl(env.Slug, "dashboard/test-workflows"))) } else { From 3a518663d17f132fe49020102189753182924857 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 12:06:57 +0100 Subject: [PATCH 11/28] fix: generate properly slug for devbox environment --- pkg/cloud/client/environments.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/cloud/client/environments.go b/pkg/cloud/client/environments.go index 924b347713..3191a3bc22 100644 --- a/pkg/cloud/client/environments.go +++ b/pkg/cloud/client/environments.go @@ -18,7 +18,7 @@ func NewEnvironmentsClient(baseUrl, token, orgID string) *EnvironmentsClient { type Environment struct { Name string `json:"name"` Id string `json:"id"` - Slug string `json:"slug"` + Slug string `json:"slug,omitempty"` Connected bool `json:"connected"` Owner string `json:"owner"` InstallCommand string `json:"installCommand,omitempty"` From 85e54795e727079e54437e204a39947b2d458733 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 12:17:14 +0100 Subject: [PATCH 12/28] fixup lint --- cmd/tcl/kubectl-testkube/devbox/command.go | 8 +++++--- cmd/tcl/kubectl-testkube/devbox/devutils/agent.go | 1 - cmd/tcl/kubectl-testkube/devbox/devutils/forwarding.go | 2 +- cmd/tcl/kubectl-testkube/devbox/devutils/interceptor.go | 5 ++++- cmd/testworkflow-init/main.go | 2 ++ 5 files changed, 12 insertions(+), 6 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index 590ce7d4ee..84277e804b 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -515,18 +515,18 @@ func NewDevBoxCommand() *cobra.Command { }) err = g.Wait() if ctx.Err() == nil { - fmt.Println(color.Green.Render(fmt.Sprintf("Applications updated in %s", time.Since(ts).Truncate(time.Millisecond)))) + color.Green.Println("Applications updated in", time.Since(ts).Truncate(time.Millisecond)) } } - fmt.Printf(color.Green.Render("Development box is ready. Took %s\n"), time.Since(startTs).Truncate(time.Millisecond)) + color.Green.Println("Development box is ready. Took", time.Since(startTs).Truncate(time.Millisecond)) if termlink.SupportsHyperlinks() { fmt.Println("Dashboard:", termlink.Link(cloud.DashboardUrl(env.Slug, "dashboard/test-workflows"), cloud.DashboardUrl(env.Slug, "dashboard/test-workflows"))) } else { fmt.Println("Dashboard:", cloud.DashboardUrl(env.Slug, "dashboard/test-workflows")) } - rebuildCtx, rebuildCtxCancel := context.WithCancel(ctx) + _, rebuildCtxCancel := context.WithCancel(ctx) for { if ctx.Err() != nil { break @@ -541,10 +541,12 @@ func NewDevBoxCommand() *cobra.Command { } fmt.Printf("%s changed\n", relPath) rebuildCtxCancel() + var rebuildCtx context.Context rebuildCtx, rebuildCtxCancel = context.WithCancel(ctx) go rebuild(rebuildCtx) } } + rebuildCtxCancel() <-cleanupCh }, diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go b/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go index 0ea4da3ff0..15b74a8422 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go @@ -20,7 +20,6 @@ import ( type Agent struct { pod *PodObject - localPort int cloud *cloudObj agentImage string initProcessImage string diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/forwarding.go b/cmd/tcl/kubectl-testkube/devbox/devutils/forwarding.go index 9f3a346920..499b0b2ee5 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/forwarding.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/forwarding.go @@ -48,7 +48,7 @@ func ForwardPod(config *rest.Config, namespace, podName string, clusterPort, loc return err } path := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", namespace, podName) - hostIP := strings.TrimLeft(config.Host, "https://") + hostIP := strings.TrimPrefix(strings.TrimPrefix(config.Host, "http://"), "https://") serverURL := url.URL{Scheme: "https", Path: path, Host: hostIP} dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, &serverURL) stopChan, readyChan := make(chan struct{}, 1), make(chan struct{}, 1) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/interceptor.go b/cmd/tcl/kubectl-testkube/devbox/devutils/interceptor.go index c478c663ad..e63ab1978b 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/interceptor.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/interceptor.go @@ -123,6 +123,9 @@ func (r *Interceptor) Create(ctx context.Context) error { }, }, }) + if err != nil { + return err + } // Wait for the container to be started err = r.pod.WaitForContainerStarted(ctx) @@ -199,7 +202,7 @@ func (r *Interceptor) Create(ctx context.Context) error { defer bufMu.Unlock() buf, _ = io.ReadAll(reader) }() - err = exec.Stream(remotecommand.StreamOptions{ + err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ Stdin: tarStream, Stdout: writer, Stderr: writer, diff --git a/cmd/testworkflow-init/main.go b/cmd/testworkflow-init/main.go index 74300096c1..c68a3b2429 100644 --- a/cmd/testworkflow-init/main.go +++ b/cmd/testworkflow-init/main.go @@ -3,6 +3,7 @@ package main import ( "encoding/json" "errors" + "fmt" "os" "os/signal" "slices" @@ -180,6 +181,7 @@ func main() { currentContainer = *action.Container case lite.ActionTypeCurrentStatus: + fmt.Println("jeeee") state.SetCurrentStatus(*action.CurrentStatus) case lite.ActionTypeStart: From f4f845333fc9a6f9423ff60c3f0415922ab53a49 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 12:22:55 +0100 Subject: [PATCH 13/28] chore: add option to open dashboard --- cmd/tcl/kubectl-testkube/devbox/README.md | 2 +- cmd/tcl/kubectl-testkube/devbox/command.go | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/README.md b/cmd/tcl/kubectl-testkube/devbox/README.md index 6064007d32..7421fda743 100644 --- a/cmd/tcl/kubectl-testkube/devbox/README.md +++ b/cmd/tcl/kubectl-testkube/devbox/README.md @@ -48,7 +48,7 @@ Flags: -n, --name string devbox name (default "1730107481990508000") -s, --sync strings synchronise resources at paths --toolkit-image string base toolkit image (default "kubeshop/testkube-tw-toolkit:latest") - -y, --yes auto accept without asking for confirmation + -o, --open open dashboard in browser ``` ## Example diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index 84277e804b..8728b9e30d 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -22,6 +22,8 @@ import ( "github.com/gookit/color" "github.com/pkg/errors" "github.com/pterm/pterm" + "github.com/savioxavier/termlink" + openurl "github.com/skratchdot/open-golang/open" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" @@ -30,8 +32,6 @@ import ( "github.com/kubeshop/testkube/pkg/cloud/client" "github.com/kubeshop/testkube/pkg/mapper/testworkflows" "github.com/kubeshop/testkube/pkg/ui" - - "github.com/savioxavier/termlink" ) const ( @@ -44,7 +44,7 @@ const ( func NewDevBoxCommand() *cobra.Command { var ( rawDevboxName string - autoAccept bool + open bool baseAgentImage string baseInitImage string baseToolkitImage string @@ -525,6 +525,9 @@ func NewDevBoxCommand() *cobra.Command { } else { fmt.Println("Dashboard:", cloud.DashboardUrl(env.Slug, "dashboard/test-workflows")) } + if open { + openurl.Run(cloud.DashboardUrl(env.Slug, "dashboard/test-workflows")) + } _, rebuildCtxCancel := context.WithCancel(ctx) for { @@ -554,10 +557,10 @@ func NewDevBoxCommand() *cobra.Command { cmd.Flags().StringVarP(&rawDevboxName, "name", "n", fmt.Sprintf("%d", time.Now().UnixNano()), "devbox name") cmd.Flags().StringSliceVarP(&syncResources, "sync", "s", nil, "synchronise resources at paths") + cmd.Flags().BoolVarP(&open, "open", "o", false, "open dashboard in browser") cmd.Flags().StringVar(&baseInitImage, "init-image", "kubeshop/testkube-tw-init:latest", "base init image") cmd.Flags().StringVar(&baseToolkitImage, "toolkit-image", "kubeshop/testkube-tw-toolkit:latest", "base toolkit image") cmd.Flags().StringVar(&baseAgentImage, "agent-image", "kubeshop/testkube-api-server:latest", "base agent image") - cmd.Flags().BoolVarP(&autoAccept, "yes", "y", false, "auto accept without asking for confirmation") return cmd } From f084f70d34d44f66d1344cb0ab8581a9aa4ab84d Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 12:25:30 +0100 Subject: [PATCH 14/28] fix: delete debug --- cmd/testworkflow-init/main.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/testworkflow-init/main.go b/cmd/testworkflow-init/main.go index c68a3b2429..74300096c1 100644 --- a/cmd/testworkflow-init/main.go +++ b/cmd/testworkflow-init/main.go @@ -3,7 +3,6 @@ package main import ( "encoding/json" "errors" - "fmt" "os" "os/signal" "slices" @@ -181,7 +180,6 @@ func main() { currentContainer = *action.Container case lite.ActionTypeCurrentStatus: - fmt.Println("jeeee") state.SetCurrentStatus(*action.CurrentStatus) case lite.ActionTypeStart: From a8791373e67ab6e31e1981cba5f86ad42d1e2dbd Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 12:29:23 +0100 Subject: [PATCH 15/28] chore: avoid logs for canceled operation --- cmd/tcl/kubectl-testkube/devbox/command.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index 8728b9e30d..3113fd8bb9 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -432,6 +432,9 @@ func NewDevBoxCommand() *cobra.Command { g.Go(func() error { its := time.Now() _, err := agentBin.Build(ctx) + if ctx.Err() != nil { + return nil + } if err != nil { fmt.Printf(" Agent: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err @@ -440,6 +443,9 @@ func NewDevBoxCommand() *cobra.Command { its = time.Now() cached, err := objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) + if ctx.Err() != nil { + return nil + } if err != nil { fmt.Printf(" Agent: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err @@ -451,6 +457,9 @@ func NewDevBoxCommand() *cobra.Command { // Restart only if it has changes err := agentPod.Restart(ctx) + if ctx.Err() != nil { + return nil + } if err == nil { fmt.Printf(" Agent: restarted. Waiting for readiness...\n") _ = agentPod.RefreshData(ctx) @@ -472,6 +481,9 @@ func NewDevBoxCommand() *cobra.Command { g.Go(func() error { its := time.Now() _, err := toolkitBin.Build(ctx) + if ctx.Err() != nil { + return nil + } if err != nil { fmt.Printf(" Toolkit: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err @@ -480,6 +492,9 @@ func NewDevBoxCommand() *cobra.Command { its = time.Now() cached, err := objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) + if ctx.Err() != nil { + return nil + } if err != nil { fmt.Printf(" Toolkit: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err @@ -494,6 +509,9 @@ func NewDevBoxCommand() *cobra.Command { g.Go(func() error { its := time.Now() _, err := initProcessBin.Build(ctx) + if ctx.Err() != nil { + return nil + } if err != nil { fmt.Printf(" Init Process: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err @@ -502,6 +520,9 @@ func NewDevBoxCommand() *cobra.Command { its = time.Now() cached, err := objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) + if ctx.Err() != nil { + return nil + } if err != nil { fmt.Printf(" Init Process: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err From 90bf0a7e76758c8e185a2166ee34c3b9161c5528 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 13:18:12 +0100 Subject: [PATCH 16/28] chore: increase timeout for build bucket --- cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go b/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go index 95c7c68798..74d1d438f4 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go @@ -114,7 +114,7 @@ func (r *ObjectStorage) Create(ctx context.Context) error { // Handle a case when port forwarder is not ready for i := 0; i < 10; i++ { - makeBucketCtx, ctxCancel := context.WithTimeout(ctx, 2*time.Second) + makeBucketCtx, ctxCancel := context.WithTimeout(ctx, 5*time.Second) err = c.MakeBucket(makeBucketCtx, "devbox", minio2.MakeBucketOptions{}) if err == nil { ctxCancel() From c953c5306d30acbb80e495c91feb7f8e3026855d Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 15:49:25 +0100 Subject: [PATCH 17/28] fix: restarting pod --- cmd/tcl/kubectl-testkube/devbox/devutils/pods.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/pods.go b/cmd/tcl/kubectl-testkube/devbox/devutils/pods.go index 91af26eace..fdd2ce55c8 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/pods.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/pods.go @@ -97,7 +97,7 @@ func (p *PodObject) create(ctx context.Context, request *corev1.Pod) error { GracePeriodSeconds: common.Ptr(int64(0)), PropagationPolicy: common.Ptr(metav1.DeletePropagationForeground), }) - if err != nil { + if err != nil && !errors.IsNotFound(err) { return errors2.Wrap(err, "failed to delete existing pod") } pod, err = p.clientSet.CoreV1().Pods(p.namespace).Create(context.Background(), request, metav1.CreateOptions{}) From 845fa8127e16ccf55b31ee3137607bf7d4c628b7 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Mon, 28 Oct 2024 15:50:37 +0100 Subject: [PATCH 18/28] fix: restarting pod --- cmd/tcl/kubectl-testkube/devbox/devutils/pods.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/pods.go b/cmd/tcl/kubectl-testkube/devbox/devutils/pods.go index fdd2ce55c8..c923083d6c 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/pods.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/pods.go @@ -208,7 +208,7 @@ func (p *PodObject) CreateService(ctx context.Context, ports ...corev1.ServicePo svc, err := p.clientSet.CoreV1().Services(p.namespace).Create(ctx, request, metav1.CreateOptions{}) if errors.IsAlreadyExists(err) { err = p.clientSet.CoreV1().Services(p.namespace).Delete(ctx, request.Name, metav1.DeleteOptions{}) - if err != nil { + if err != nil && !errors.IsNotFound(err) { return errors2.Wrap(err, "failed to delete existing service") } svc, err = p.clientSet.CoreV1().Services(p.namespace).Create(ctx, request, metav1.CreateOptions{}) From d06fc1be8bf7147d0cbe83037ecad53585f12fa8 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Tue, 29 Oct 2024 08:53:59 +0100 Subject: [PATCH 19/28] chore: clean messages and add transfer size --- cmd/tcl/kubectl-testkube/devbox/command.go | 59 ++++++++++--------- .../devbox/devutils/objectstorage.go | 23 ++++---- 2 files changed, 42 insertions(+), 40 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index 3113fd8bb9..06c1a3fa5b 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -19,6 +19,7 @@ import ( "syscall" "time" + "github.com/dustin/go-humanize" "github.com/gookit/color" "github.com/pkg/errors" "github.com/pterm/pterm" @@ -189,7 +190,7 @@ func NewDevBoxCommand() *cobra.Command { its := time.Now() _, err := interceptorBin.Build(ctx) if err != nil { - fmt.Printf("[Interceptor] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf("[Interceptor] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { fmt.Printf("[Interceptor] Built in %s.\n", time.Since(its).Truncate(time.Millisecond)) } @@ -215,18 +216,18 @@ func NewDevBoxCommand() *cobra.Command { its := time.Now() _, err := agentBin.Build(ctx) if err != nil { - fmt.Printf("[Agent] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf("[Agent] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { - fmt.Printf("[Agent] Built in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf("[Agent] Built in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), agentBin.Size()) } <-objectStorageReadiness fmt.Println("[Agent] Uploading...") its = time.Now() - _, err = objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) + _, size, err := objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) if err != nil { - fmt.Printf("[Agent] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf("[Agent] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { - fmt.Printf("[Agent] Uploaded in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf("[Agent] Uploaded %s in %s.\n", humanize.Bytes(uint64(size)), time.Since(its).Truncate(time.Millisecond)) } fmt.Println("[Agent] Deploying...") if err = agent.Create(ctx, env); err != nil { @@ -246,18 +247,18 @@ func NewDevBoxCommand() *cobra.Command { its := time.Now() _, err := toolkitBin.Build(ctx) if err != nil { - fmt.Printf("[Toolkit] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf("[Toolkit] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { fmt.Printf("[Toolkit] Built in %s.\n", time.Since(its).Truncate(time.Millisecond)) } <-objectStorageReadiness fmt.Println("[Toolkit] Uploading...") its = time.Now() - _, err = objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) + _, size, err := objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) if err != nil { - fmt.Printf("[Toolkit] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf("[Toolkit] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { - fmt.Printf("[Toolkit] Uploaded in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf("[Toolkit] Uploaded %s in %s.\n", humanize.Bytes(uint64(size)), time.Since(its).Truncate(time.Millisecond)) } return nil }) @@ -268,18 +269,18 @@ func NewDevBoxCommand() *cobra.Command { its := time.Now() _, err := initProcessBin.Build(ctx) if err != nil { - fmt.Printf("[Init Process] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf("[Init Process] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { fmt.Printf("[Init Process] Built in %s.\n", time.Since(its).Truncate(time.Millisecond)) } <-objectStorageReadiness fmt.Println("[Init Process] Uploading...") its = time.Now() - _, err = objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) + _, size, err := objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) if err != nil { - fmt.Printf("[Init Process] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf("[Init Process] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { - fmt.Printf("[Init Process] Uploaded in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf("[Init Process] Uploaded %s in %s.\n", humanize.Bytes(uint64(size)), time.Since(its).Truncate(time.Millisecond)) } return nil }) @@ -436,24 +437,24 @@ func NewDevBoxCommand() *cobra.Command { return nil } if err != nil { - fmt.Printf(" Agent: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf(" Agent: build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf(" Agent: build finished in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), agentBin.Size()) + fmt.Printf(" Agent: built in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), agentBin.Size()) its = time.Now() - cached, err := objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) + cached, size, err := objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) if ctx.Err() != nil { return nil } if err != nil { - fmt.Printf(" Agent: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf(" Agent: upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } if cached { fmt.Printf(" Agent: no changes.\n") } else { - fmt.Printf(" Agent: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf(" Agent: uploaded %s in %s.\n", humanize.Bytes(uint64(size)), time.Since(its).Truncate(time.Millisecond)) // Restart only if it has changes err := agentPod.Restart(ctx) @@ -485,24 +486,24 @@ func NewDevBoxCommand() *cobra.Command { return nil } if err != nil { - fmt.Printf(" Toolkit: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf(" Toolkit: build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf(" Toolkit: build finished in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), toolkitBin.Size()) + fmt.Printf(" Toolkit: built in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), toolkitBin.Size()) its = time.Now() - cached, err := objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) + cached, size, err := objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) if ctx.Err() != nil { return nil } if err != nil { - fmt.Printf(" Toolkit: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf(" Toolkit: upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } if cached { fmt.Printf(" Toolkit: no changes.\n") } else { - fmt.Printf(" Toolkit: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf(" Toolkit: uploaded %s in %s.\n", humanize.Bytes(uint64(size)), time.Since(its).Truncate(time.Millisecond)) } return nil }) @@ -513,24 +514,24 @@ func NewDevBoxCommand() *cobra.Command { return nil } if err != nil { - fmt.Printf(" Init Process: build finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf(" Init Process: build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } - fmt.Printf(" Init Process: build finished in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), initProcessBin.Size()) + fmt.Printf(" Init Process: built in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), initProcessBin.Size()) its = time.Now() - cached, err := objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) + cached, size, err := objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) if ctx.Err() != nil { return nil } if err != nil { - fmt.Printf(" Init Process: upload finished in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + color.Red.Printf(" Init Process: upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) return err } if cached { fmt.Printf(" Init Process: no changes.\n") } else { - fmt.Printf(" Init Process: upload finished in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf(" Init Process: uploaded %s in %s.\n", humanize.Bytes(uint64(size)), time.Since(its).Truncate(time.Millisecond)) } return nil }) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go b/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go index 74d1d438f4..95c2cafd01 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go @@ -156,13 +156,13 @@ func (r *ObjectStorage) WaitForReady(ctx context.Context) error { return r.pod.WaitForReady(ctx) } -func (r *ObjectStorage) Upload(ctx context.Context, path string, fsPath string, hash string) (bool, error) { +func (r *ObjectStorage) Upload(ctx context.Context, path string, fsPath string, hash string) (cached bool, transferred int, err error) { c, err := r.Client() if err != nil { - return false, err + return false, 0, err } if hash != "" && r.Is(path, hash) { - return true, nil + return true, 0, nil } putUrl, err := c.PresignHeader(ctx, "PUT", "devbox", path, 15*time.Minute, nil, http.Header{ "X-Amz-Meta-Snowball-Auto-Extract": {"true"}, @@ -171,17 +171,17 @@ func (r *ObjectStorage) Upload(ctx context.Context, path string, fsPath string, "Content-Encoding": {"gzip"}, }) if err != nil { - return false, err + return false, 0, err } file, err := os.Open(fsPath) if err != nil { - return false, err + return false, 0, err } defer file.Close() stat, err := file.Stat() if err != nil { - return false, err + return false, 0, err } buf := new(bytes.Buffer) @@ -191,12 +191,13 @@ func (r *ObjectStorage) Upload(ctx context.Context, path string, fsPath string, tarStream.Close() }() io.Copy(buf, tarStream) + bufLen := buf.Len() req, err := http.NewRequestWithContext(ctx, http.MethodPut, putUrl.String(), buf) if err != nil { - return false, err + return false, bufLen, err } - req.ContentLength = int64(buf.Len()) + req.ContentLength = int64(bufLen) req.Header.Set("X-Amz-Meta-Snowball-Auto-Extract", "true") req.Header.Set("X-Amz-Meta-Minio-Snowball-Prefix", filepath.Dir(path)) req.Header.Set("Content-Type", "application/gzip") @@ -207,12 +208,12 @@ func (r *ObjectStorage) Upload(ctx context.Context, path string, fsPath string, client := &http.Client{Transport: tr} res, err := client.Do(req) if err != nil { - return false, err + return false, bufLen, err } if res.StatusCode != http.StatusOK { b, _ := io.ReadAll(res.Body) - return false, fmt.Errorf("failed saving file: status code: %d / message: %s", res.StatusCode, string(b)) + return false, bufLen, fmt.Errorf("failed saving file: status code: %d / message: %s", res.StatusCode, string(b)) } r.SetHash(path, hash) - return false, nil + return false, bufLen, nil } From 9d91e7b3b15ee1f6b53693de117527331a327f13 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Wed, 30 Oct 2024 09:58:22 +0100 Subject: [PATCH 20/28] feat: add basic binary storage to avoid transferring too much data in devbox --- cmd/tcl/devbox-binary-storage/main.go | 227 ++++++++++++++ cmd/tcl/devbox-mutating-webhook/main.go | 10 +- cmd/tcl/kubectl-testkube/devbox/README.md | 5 +- cmd/tcl/kubectl-testkube/devbox/command.go | 89 ++++-- .../kubectl-testkube/devbox/devutils/agent.go | 5 +- .../devbox/devutils/binary.go | 61 ++-- .../devbox/devutils/binarypatch.go | 254 +++++++++++++++ .../devbox/devutils/binarystorage.go | 296 ++++++++++++++++++ .../devbox/devutils/objectstorage.go | 219 ------------- 9 files changed, 881 insertions(+), 285 deletions(-) create mode 100644 cmd/tcl/devbox-binary-storage/main.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go create mode 100644 cmd/tcl/kubectl-testkube/devbox/devutils/binarystorage.go delete mode 100644 cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go diff --git a/cmd/tcl/devbox-binary-storage/main.go b/cmd/tcl/devbox-binary-storage/main.go new file mode 100644 index 0000000000..b387472286 --- /dev/null +++ b/cmd/tcl/devbox-binary-storage/main.go @@ -0,0 +1,227 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package main + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/dustin/go-humanize" + + "github.com/kubeshop/testkube/cmd/tcl/kubectl-testkube/devbox/devutils" +) + +var ( + locks = make(map[string]*sync.RWMutex) + locksMu sync.Mutex + hashCache = make(map[string]string) +) + +func getLock(filePath string) *sync.RWMutex { + locksMu.Lock() + defer locksMu.Unlock() + if locks[filePath] == nil { + locks[filePath] = new(sync.RWMutex) + } + return locks[filePath] +} + +func rebuildHash(filePath string) { + hashCache[filePath] = "" + f, err := os.Open(filePath) + if err != nil { + return + } + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err == nil { + hashCache[filePath] = fmt.Sprintf("%x", h.Sum(nil)) + } +} + +func getHash(filePath string) string { + if hashCache[filePath] == "" { + rebuildHash(filePath) + } + return hashCache[filePath] +} + +func main() { + storagePath := "/storage" + http.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + filePath := filepath.Clean(strings.TrimPrefix(r.URL.Path, "/")) + if filePath == "" { + w.WriteHeader(http.StatusNotFound) + return + } + localPath := filepath.Join(storagePath, filePath) + if r.Method == http.MethodGet { + getLock(filePath).RLock() + defer getLock(filePath).RUnlock() + + file, err := os.Open(localPath) + if err != nil { + w.WriteHeader(http.StatusNotFound) + return + } + stat, err := file.Stat() + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Length", fmt.Sprintf("%d", stat.Size())) + w.WriteHeader(http.StatusOK) + io.Copy(w, file) + return + } else if r.Method == http.MethodPost { + getLock(filePath).Lock() + defer getLock(filePath).Unlock() + + body, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Println("failed reading body", err) + return + } + if r.ContentLength != int64(len(body)) { + w.WriteHeader(http.StatusBadRequest) + return + } + if r.Header.Get("Content-Encoding") == "gzip" { + gz, err := gzip.NewReader(bytes.NewBuffer(body)) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Println("failed reading body into gzip", err) + return + } + body, err = io.ReadAll(gz) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Println("failed reading back data from gzip stream", err) + return + } + } + + err = os.WriteFile(localPath, body, 0666) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Println("failed to write file", err) + return + } + + h := sha256.New() + if _, err := io.Copy(h, bytes.NewBuffer(body)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Println("failed to build hash", err) + } + hashCache[filePath] = fmt.Sprintf("%x", h.Sum(nil)) + + fmt.Println("saved file", filePath, humanize.Bytes(uint64(len(body)))) + w.WriteHeader(http.StatusOK) + return + } else if r.Method == http.MethodPatch { + getLock(filePath).Lock() + defer getLock(filePath).Unlock() + + body, err := io.ReadAll(r.Body) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Println("failed reading body", err) + return + } + if r.ContentLength != int64(len(body)) { + w.WriteHeader(http.StatusBadRequest) + return + } + if r.Header.Get("Content-Encoding") == "gzip" { + gz, err := gzip.NewReader(bytes.NewBuffer(body)) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Println("failed reading body into gzip", err) + return + } + body, err = io.ReadAll(gz) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Println("failed reading back data from gzip stream", err) + return + } + } + + // Verify if patch can be applied + if r.Header.Get("X-Prev-Hash") != getHash(filePath) { + w.WriteHeader(http.StatusConflict) + return + } + + // Apply patch + prevFile, err := os.ReadFile(localPath) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Println("failed reading existing file", err) + return + } + patch := devutils.NewBinaryPatchFromBytes(body) + file := patch.Apply(prevFile) + + h := sha256.New() + if _, err := io.Copy(h, bytes.NewBuffer(file)); err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Println("failed to build hash", err) + return + } + + // Validate hash + nextHash := fmt.Sprintf("%x", h.Sum(nil)) + if r.Header.Get("X-Hash") != nextHash { + w.WriteHeader(http.StatusBadRequest) + fmt.Println("after applying patch result has different hash than expected", err) + return + } + fmt.Println("Expected hash", r.Header.Get("X-Hash"), "got", nextHash) + err = os.WriteFile(localPath, file, 0666) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Println("failed to write file", err) + return + } + hashCache[filePath] = nextHash + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusMethodNotAllowed) + }) + + stopSignal := make(chan os.Signal, 1) + signal.Notify(stopSignal, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-stopSignal + os.Exit(0) + }() + + fmt.Println("Starting server...") + + panic(http.ListenAndServe(":8080", nil)) +} diff --git a/cmd/tcl/devbox-mutating-webhook/main.go b/cmd/tcl/devbox-mutating-webhook/main.go index 32592ae482..d406d3b835 100644 --- a/cmd/tcl/devbox-mutating-webhook/main.go +++ b/cmd/tcl/devbox-mutating-webhook/main.go @@ -88,17 +88,15 @@ func main() { script := ` set -e - /usr/bin/mc config host add minio "http://devbox-storage:9000" "minioadmin" "minioadmin" - /usr/bin/mc cp --disable-multipart minio/devbox/bin/init /.tk-devbox/init || exit 1 + /.tktw-bin/wget -O /.tk-devbox/init http://devbox-binary:8080/init || exit 1 chmod 777 /.tk-devbox/init chmod +x /.tk-devbox/init ls -lah /.tk-devbox` if usesToolkit { script = ` set -e - /usr/bin/mc config host add minio "http://devbox-storage:9000" "minioadmin" "minioadmin" - /usr/bin/mc cp --disable-multipart minio/devbox/bin/init /.tk-devbox/init || exit 1 - /usr/bin/mc cp --disable-multipart minio/devbox/bin/toolkit /.tk-devbox/toolkit || exit 1 + /.tktw-bin/wget -O /.tk-devbox/init http://devbox-binary:8080/init || exit 1 + /.tktw-bin/wget -O /.tk-devbox/toolkit http://devbox-binary:8080/toolkit || exit 1 chmod 777 /.tk-devbox/init chmod 777 /.tk-devbox/toolkit chmod +x /.tk-devbox/init @@ -108,7 +106,7 @@ func main() { pod.Spec.InitContainers = append([]corev1.Container{{ Name: "devbox-init", - Image: "minio/mc:latest", + Image: initImage, ImagePullPolicy: corev1.PullIfNotPresent, Command: []string{"/bin/sh", "-c"}, Args: []string{script}, diff --git a/cmd/tcl/kubectl-testkube/devbox/README.md b/cmd/tcl/kubectl-testkube/devbox/README.md index 7421fda743..b41c8818f8 100644 --- a/cmd/tcl/kubectl-testkube/devbox/README.md +++ b/cmd/tcl/kubectl-testkube/devbox/README.md @@ -10,8 +10,9 @@ This utility is used to help with development of the Agent features (like Test W * Webhooks are disabled * Legacy Tests and Test Suites are disabled * It's not using Helm Chart, so default templates are not available -* For live changes, it deploys Interceptor and Object Storage into the current cluster - * Object Storage stores latest binaries for the Agent, Toolkit and Init Process +* For live changes, it deploys Interceptor and Binary Storage into the current cluster + * Binary Storage stores latest binaries for the Agent, Toolkit and Init Process + * Binary Storage is optimized for patching binaries with incremental builds (to avoid sending the whole binary, when only small part is changed) * Interceptor loads the Toolkit and Init Process from the Object Storage into every Test Workflow Execution pod ## Usage diff --git a/cmd/tcl/kubectl-testkube/devbox/command.go b/cmd/tcl/kubectl-testkube/devbox/command.go index 06c1a3fa5b..80122c636b 100644 --- a/cmd/tcl/kubectl-testkube/devbox/command.go +++ b/cmd/tcl/kubectl-testkube/devbox/command.go @@ -36,10 +36,11 @@ import ( ) const ( - InterceptorMainPath = "cmd/tcl/devbox-mutating-webhook/main.go" - AgentMainPath = "cmd/api-server/main.go" - ToolkitMainPath = "cmd/testworkflow-toolkit/main.go" - InitProcessMainPath = "cmd/testworkflow-init/main.go" + InterceptorMainPath = "cmd/tcl/devbox-mutating-webhook/main.go" + BinaryStorageMainPath = "cmd/tcl/devbox-binary-storage/main.go" + AgentMainPath = "cmd/api-server/main.go" + ToolkitMainPath = "cmd/testworkflow-toolkit/main.go" + InitProcessMainPath = "cmd/testworkflow-init/main.go" ) func NewDevBoxCommand() *cobra.Command { @@ -108,20 +109,28 @@ func NewDevBoxCommand() *cobra.Command { // Initialize bare cluster resources namespace := cluster.Namespace(fmt.Sprintf("devbox-%s", rawDevboxName)) - objectStoragePod := namespace.Pod("devbox-storage") interceptorPod := namespace.Pod("devbox-interceptor") agentPod := namespace.Pod("devbox-agent") + binaryStoragePod := namespace.Pod("devbox-binary") // Initialize binaries interceptorBin := devutils.NewBinary(InterceptorMainPath, cluster.OperatingSystem(), cluster.Architecture()) + binaryStorageBin := devutils.NewBinary(BinaryStorageMainPath, cluster.OperatingSystem(), cluster.Architecture()) agentBin := devutils.NewBinary(AgentMainPath, cluster.OperatingSystem(), cluster.Architecture()) toolkitBin := devutils.NewBinary(ToolkitMainPath, cluster.OperatingSystem(), cluster.Architecture()) initProcessBin := devutils.NewBinary(InitProcessMainPath, cluster.OperatingSystem(), cluster.Architecture()) + // Initialize clean up + defer interceptorBin.Close() + defer binaryStorageBin.Close() + defer agentBin.Close() + defer toolkitBin.Close() + defer initProcessBin.Close() + // Initialize wrappers over cluster resources interceptor := devutils.NewInterceptor(interceptorPod, baseInitImage, baseToolkitImage, interceptorBin) agent := devutils.NewAgent(agentPod, cloud, baseAgentImage, baseInitImage, baseToolkitImage) - objectStorage := devutils.NewObjectStorage(objectStoragePod) + binaryStorage := devutils.NewBinaryStorage(binaryStoragePod, binaryStorageBin) var env *client.Environment // Cleanup @@ -130,6 +139,12 @@ func NewDevBoxCommand() *cobra.Command { cleanup := func() { cleanupMu.Lock() + interceptorBin.Close() + binaryStorageBin.Close() + agentBin.Close() + toolkitBin.Close() + initProcessBin.Close() + fmt.Println("Deleting namespace...") if err := namespace.Destroy(); err != nil { fmt.Println("Failed to destroy namespace:", err.Error()) @@ -167,22 +182,7 @@ func NewDevBoxCommand() *cobra.Command { } g, _ := errgroup.WithContext(ctx) - objectStorageReadiness := make(chan struct{}) - - // Deploy object storage - g.Go(func() error { - fmt.Println("[Object Storage] Creating...") - if err = objectStorage.Create(ctx); err != nil { - fail(errors.Wrap(err, "failed to create object storage")) - } - fmt.Println("[Object Storage] Waiting for readiness...") - if err = objectStorage.WaitForReady(ctx); err != nil { - fail(errors.Wrap(err, "failed to wait for readiness")) - } - fmt.Println("[Object Storage] Ready") - close(objectStorageReadiness) - return nil - }) + binaryStorageReadiness := make(chan struct{}) // Deploying interceptor g.Go(func() error { @@ -210,6 +210,29 @@ func NewDevBoxCommand() *cobra.Command { return nil }) + // Deploying binary storage + g.Go(func() error { + fmt.Println("[Binary Storage] Building...") + its := time.Now() + _, err := binaryStorageBin.Build(ctx) + if err != nil { + color.Red.Printf("[Binary Storage] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) + } else { + fmt.Printf("[Binary Storage] Built in %s.\n", time.Since(its).Truncate(time.Millisecond)) + } + fmt.Println("[Binary Storage] Deploying...") + if err = binaryStorage.Create(ctx); err != nil { + fail(errors.Wrap(err, "failed to create binary storage")) + } + fmt.Println("[Binary Storage] Waiting for readiness...") + if err = binaryStorage.WaitForReady(ctx); err != nil { + fail(errors.Wrap(err, "failed to create binary storage")) + } + fmt.Println("[Binary Storage] Ready") + close(binaryStorageReadiness) + return nil + }) + // Deploying the Agent g.Go(func() error { fmt.Println("[Agent] Building...") @@ -220,10 +243,10 @@ func NewDevBoxCommand() *cobra.Command { } else { fmt.Printf("[Agent] Built in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), agentBin.Size()) } - <-objectStorageReadiness + <-binaryStorageReadiness fmt.Println("[Agent] Uploading...") its = time.Now() - _, size, err := objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) + _, size, err := binaryStorage.Upload(ctx, "testkube-api-server", agentBin) if err != nil { color.Red.Printf("[Agent] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { @@ -249,12 +272,12 @@ func NewDevBoxCommand() *cobra.Command { if err != nil { color.Red.Printf("[Toolkit] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { - fmt.Printf("[Toolkit] Built in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf("[Toolkit] Built in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), toolkitBin.Size()) } - <-objectStorageReadiness + <-binaryStorageReadiness fmt.Println("[Toolkit] Uploading...") its = time.Now() - _, size, err := objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) + _, size, err := binaryStorage.Upload(ctx, "toolkit", toolkitBin) if err != nil { color.Red.Printf("[Toolkit] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { @@ -271,12 +294,12 @@ func NewDevBoxCommand() *cobra.Command { if err != nil { color.Red.Printf("[Init Process] Build failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { - fmt.Printf("[Init Process] Built in %s.\n", time.Since(its).Truncate(time.Millisecond)) + fmt.Printf("[Init Process] Built in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), initProcessBin.Size()) } - <-objectStorageReadiness + <-binaryStorageReadiness fmt.Println("[Init Process] Uploading...") its = time.Now() - _, size, err := objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) + _, size, err := binaryStorage.Upload(ctx, "init", initProcessBin) if err != nil { color.Red.Printf("[Init Process] Upload failed in %s. Error: %s\n", time.Since(its).Truncate(time.Millisecond), err) } else { @@ -443,7 +466,7 @@ func NewDevBoxCommand() *cobra.Command { fmt.Printf(" Agent: built in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), agentBin.Size()) its = time.Now() - cached, size, err := objectStorage.Upload(ctx, "bin/testkube-api-server", agentBin.Path(), agentBin.Hash()) + cached, size, err := binaryStorage.Upload(ctx, "testkube-api-server", agentBin) if ctx.Err() != nil { return nil } @@ -492,7 +515,7 @@ func NewDevBoxCommand() *cobra.Command { fmt.Printf(" Toolkit: built in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), toolkitBin.Size()) its = time.Now() - cached, size, err := objectStorage.Upload(ctx, "bin/toolkit", toolkitBin.Path(), toolkitBin.Hash()) + cached, size, err := binaryStorage.Upload(ctx, "toolkit", toolkitBin) if ctx.Err() != nil { return nil } @@ -520,7 +543,7 @@ func NewDevBoxCommand() *cobra.Command { fmt.Printf(" Init Process: built in %s (size: %s).\n", time.Since(its).Truncate(time.Millisecond), initProcessBin.Size()) its = time.Now() - cached, size, err := objectStorage.Upload(ctx, "bin/init", initProcessBin.Path(), initProcessBin.Hash()) + cached, size, err := binaryStorage.Upload(ctx, "init", initProcessBin) if ctx.Err() != nil { return nil } diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go b/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go index 15b74a8422..384ace4d3f 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go @@ -52,12 +52,11 @@ func (r *Agent) Create(ctx context.Context, env *client.Environment) error { ServiceAccountName: "devbox-account", InitContainers: []corev1.Container{{ Name: "devbox-init", - Image: "minio/mc:latest", + Image: "busybox:1.36.1-musl", ImagePullPolicy: corev1.PullIfNotPresent, Command: []string{"/bin/sh", "-c"}, Args: []string{` - /usr/bin/mc config host add minio "http://devbox-storage:9000" "minioadmin" "minioadmin" - /usr/bin/mc cp --disable-multipart minio/devbox/bin/testkube-api-server /.tk-devbox/testkube-api-server || exit 1 + /bin/wget -O /.tk-devbox/testkube-api-server http://devbox-binary:8080/testkube-api-server || exit 1 chmod 777 /.tk-devbox/testkube-api-server chmod +x /.tk-devbox/testkube-api-server ls -lah /.tk-devbox`}, diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/binary.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binary.go index 73327d15be..fa84239a85 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/binary.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binary.go @@ -18,6 +18,7 @@ import ( "path/filepath" "strings" "sync" + "time" "github.com/dustin/go-humanize" @@ -25,21 +26,24 @@ import ( ) type Binary struct { - mainPath string - outputPath string - operatingSystem string - procArchitecture string - - hash string - buildMu sync.RWMutex + mainPath string + outputPath string + alternatingOutputPath string + operatingSystem string + procArchitecture string + + prevHash string + hash string + buildMu sync.RWMutex } func NewBinary(mainPath, operatingSystem, procArchitecture string) *Binary { return &Binary{ - mainPath: mainPath, - outputPath: tmp.Name(), - operatingSystem: operatingSystem, - procArchitecture: procArchitecture, + mainPath: mainPath, + outputPath: tmp.Name(), + alternatingOutputPath: tmp.Name(), + operatingSystem: operatingSystem, + procArchitecture: procArchitecture, } } @@ -55,6 +59,7 @@ func (b *Binary) updateHash() error { return fmt.Errorf("failed to get hash: %s", err.Error()) } + b.prevHash = b.hash b.hash = fmt.Sprintf("%x", h.Sum(nil)) return nil } @@ -81,13 +86,26 @@ func (b *Binary) Size() string { return humanize.Bytes(uint64(stat.Size())) } +func (b *Binary) patch() ([]byte, error) { + prevFile, prevErr := os.ReadFile(b.alternatingOutputPath) + if prevErr != nil { + return nil, prevErr + } + currentFile, currentErr := os.ReadFile(b.outputPath) + if currentErr != nil { + return nil, currentErr + } + // In 1.5 second either it will optimize, or just pass it down + return NewBinaryPatchFor(prevFile, currentFile, 1500*time.Millisecond).Bytes(), nil +} + func (b *Binary) Build(ctx context.Context) (string, error) { b.buildMu.Lock() defer b.buildMu.Unlock() cmd := exec.Command( "go", "build", - "-o", b.outputPath, + "-o", b.alternatingOutputPath, fmt.Sprintf("-ldflags=%s", strings.Join([]string{ "-X github.com/kubeshop/testkube/internal/app/api/v1.SlackBotClientID=", "-X github.com/kubeshop/testkube/internal/app/api/v1.SlackBotClientSecret=", @@ -135,16 +153,10 @@ func (b *Binary) Build(ctx context.Context) (string, error) { return "", fmt.Errorf("failed to build: %s: %s", err.Error(), string(buf)) } - f, err := os.Open(b.outputPath) - if err != nil { - return "", fmt.Errorf("failed to get hash: reading binary: %s", err.Error()) - } - defer f.Close() - - h := sha256.New() - if _, err := io.Copy(h, f); err != nil { - return "", fmt.Errorf("failed to get hash: %s", err.Error()) - } + // Switch paths + p := b.alternatingOutputPath + b.alternatingOutputPath = b.outputPath + b.outputPath = p err = b.updateHash() if err != nil { @@ -152,3 +164,8 @@ func (b *Binary) Build(ctx context.Context) (string, error) { } return b.hash, err } + +func (b *Binary) Close() { + os.Remove(b.outputPath) + os.Remove(b.alternatingOutputPath) +} diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go new file mode 100644 index 0000000000..8effd6b27d --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go @@ -0,0 +1,254 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devutils + +import ( + "bytes" + "encoding/binary" + "time" +) + +const ( + BinaryPatchAddOp = 1 + BinaryPatchPreserveOp = 2 + BinaryPatchDeleteOp = 3 + + BinaryBatchBlockSize = 100 + BinaryBatchBlockFactor = 8 + BinaryBatchBlockFactorMax = 50 + BinaryPatchMinCommonSize = 8 +) + +// BinaryPatch is helper to avoid sending the whole binaries. +// It's optimized for fast analysis to send it ASAP, +// so the resulting patch may be bigger than it's needed. +// It's working nicely for incremental builds though. +type BinaryPatch struct { + buf *bytes.Buffer + lastOp int + lastCount int +} + +type BinaryPatchThreshold struct { + Duration time.Duration + Minimum float64 +} + +func NewBinaryPatch() *BinaryPatch { + return &BinaryPatch{ + buf: bytes.NewBuffer(nil), + } +} + +func NewBinaryPatchFromBytes(data []byte) *BinaryPatch { + return &BinaryPatch{ + buf: bytes.NewBuffer(data), + } +} + +func NewBinaryPatchFor(originalFile, currentFile []byte, maxDuration time.Duration) *BinaryPatch { + p := NewBinaryPatch() + p.Read(originalFile, currentFile, maxDuration) + return p +} + +func (p *BinaryPatch) Bytes() []byte { + return p.buf.Bytes() +} + +func (p *BinaryPatch) Load(data []byte) { + p.buf = bytes.NewBuffer(data) +} + +func (p *BinaryPatch) Read(originalFile, currentFile []byte, maxDuration time.Duration) { + originalFileLen := len(originalFile) + currentFileLen := len(currentFile) + smallerFileLen := min(originalFileLen, currentFileLen) + + iteration := 0 + currentIndex := 0 + originalIndex := 0 + + // Omit same sequence + omit := 0 + for omit < smallerFileLen && currentFile[omit] == originalFile[omit] { + omit++ + } + originalIndex += omit + currentIndex += omit + p.Preserve(omit) + + replaced := 0 + ts := time.Now() + +loop: + for { + iteration++ + leftCurrent := currentFileLen - currentIndex + leftOriginal := originalFileLen - originalIndex + leftMin := min(leftCurrent, leftOriginal) + if leftMin <= BinaryPatchMinCommonSize { + break + } + segment := min(leftMin, BinaryBatchBlockSize*BinaryBatchBlockFactor) - BinaryPatchMinCommonSize + maxIterations := min(segment+replaced, leftMin) + + // Extract fast when duration passed + if maxDuration != 0 && iteration%1000 == 0 && maxDuration < time.Since(ts) { + p.Delete(originalFileLen - originalIndex) + p.Add(currentFile[currentIndex:]) + return + } + + // Try recovering from an endless loop + if maxIterations > BinaryBatchBlockSize*BinaryBatchBlockFactorMax { + p.Add(currentFile[currentIndex : currentIndex+maxIterations/2]) + replaced -= maxIterations / 2 + currentIndex += maxIterations / 2 + continue loop + } + + // Find next match when adding to original file + biggestCommon := 0 + biggestCommonAt := 0 + biggestCommonDel := false + for i := 0; i < maxIterations; i++ { + common := 0 + for i+common < leftMin && currentFile[currentIndex+i+common] == originalFile[originalIndex+common] { + common++ + } + if common > biggestCommon { + biggestCommon = common + biggestCommonAt = i + biggestCommonDel = false + } + common = 0 + for i+common < leftMin && currentFile[currentIndex+common] == originalFile[originalIndex+i+common] { + common++ + } + if common > biggestCommon { + biggestCommon = common + biggestCommonAt = i + biggestCommonDel = true + } + } + + if biggestCommon >= BinaryPatchMinCommonSize { + if biggestCommonDel { + p.Delete(biggestCommonAt) + p.Preserve(biggestCommon) + replaced += biggestCommonAt + originalIndex += biggestCommonAt + biggestCommon + currentIndex += biggestCommon + } else { + p.Add(currentFile[currentIndex : currentIndex+biggestCommonAt]) + p.Preserve(biggestCommon) + replaced -= biggestCommonAt + currentIndex += biggestCommonAt + biggestCommon + originalIndex += biggestCommon + } + continue loop + } + + // Treat some part as deleted to proceed + p.Delete(BinaryPatchMinCommonSize) + replaced += BinaryPatchMinCommonSize + originalIndex += BinaryPatchMinCommonSize + } + + if currentIndex != currentFileLen { + p.Add(currentFile[currentIndex:]) + } else if originalIndex != originalFileLen { + p.Preserve(originalFileLen - originalIndex) + } +} + +func (p *BinaryPatch) Len() int { + return p.buf.Len() +} + +func (p *BinaryPatch) Preserve(bytesCount int) { + if bytesCount == 0 { + return + } + if p.lastOp == BinaryPatchPreserveOp { + p.lastCount += bytesCount + b := p.buf.Bytes() + binary.LittleEndian.PutUint32(b[len(b)-4:], uint32(p.lastCount)) + return + } + p.lastOp = BinaryPatchPreserveOp + p.lastCount = bytesCount + p.buf.WriteByte(BinaryPatchPreserveOp) + num := make([]byte, 4) + binary.LittleEndian.PutUint32(num, uint32(bytesCount)) + p.buf.Write(num) +} + +func (p *BinaryPatch) Delete(bytesCount int) { + if bytesCount == 0 { + return + } + if p.lastOp == BinaryPatchDeleteOp { + p.lastCount += bytesCount + b := p.buf.Bytes() + binary.LittleEndian.PutUint32(b[len(b)-4:], uint32(p.lastCount)) + return + } + p.lastOp = BinaryPatchDeleteOp + p.lastCount = bytesCount + p.buf.WriteByte(BinaryPatchDeleteOp) + num := make([]byte, 4) + binary.LittleEndian.PutUint32(num, uint32(bytesCount)) + p.buf.Write(num) +} + +func (p *BinaryPatch) Add(bytesArr []byte) { + if len(bytesArr) == 0 { + return + } + if p.lastOp == BinaryPatchAddOp { + b := p.buf.Bytes() + nextCount := p.lastCount + len(bytesArr) + binary.LittleEndian.PutUint32(b[len(b)-p.lastCount-4:], uint32(nextCount)) + p.buf.Write(bytesArr) + p.lastCount = nextCount + return + } + p.lastOp = BinaryPatchAddOp + p.lastCount = len(bytesArr) + p.buf.WriteByte(BinaryPatchAddOp) + num := make([]byte, 4) + binary.LittleEndian.PutUint32(num, uint32(len(bytesArr))) + p.buf.Write(num) + p.buf.Write(bytesArr) +} + +func (p *BinaryPatch) Apply(original []byte) []byte { + result := make([]byte, 0) + patch := p.buf.Bytes() + for i := 0; i < len(patch); { + switch patch[i] { + case BinaryPatchPreserveOp: + count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) + result = append(result, original[:count]...) + original = original[count:] + i += 5 + case BinaryPatchDeleteOp: + count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) + original = original[count:] + i += 5 + case BinaryPatchAddOp: + count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) + result = append(result, patch[i+5:i+5+int(count)]...) + i += 5 + int(count) + } + } + return result +} diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/binarystorage.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binarystorage.go new file mode 100644 index 0000000000..c7b81c1cb1 --- /dev/null +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binarystorage.go @@ -0,0 +1,296 @@ +// Copyright 2024 Testkube. +// +// Licensed as a Testkube Pro file under the Testkube Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt + +package devutils + +import ( + "bytes" + "compress/gzip" + "context" + "crypto/tls" + "fmt" + "io" + "net/http" + "os" + "sync" + "time" + + errors2 "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/remotecommand" + + "github.com/kubeshop/testkube/cmd/testworkflow-toolkit/artifacts" + "github.com/kubeshop/testkube/internal/common" +) + +type BinaryStorage struct { + pod *PodObject + binary *Binary + localPort int + hashes map[string]string + hashMu sync.RWMutex +} + +func NewBinaryStorage(pod *PodObject, binary *Binary) *BinaryStorage { + return &BinaryStorage{ + pod: pod, + binary: binary, + hashes: make(map[string]string), + } +} + +func (r *BinaryStorage) Create(ctx context.Context) error { + if r.binary.Hash() == "" { + return errors2.New("binary storage server binary is not built") + } + + // Deploy Pod + err := r.pod.Create(ctx, &corev1.Pod{ + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: common.Ptr(int64(1)), + Volumes: []corev1.Volume{ + {Name: "server", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + {Name: "storage", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + }, + Containers: []corev1.Container{ + { + Name: "binary-storage", + Image: "busybox:1.36.1-musl", + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/sh", "-c", fmt.Sprintf("while [ ! -f /app/server-ready ]; do sleep 1; done\n/app/server")}, + VolumeMounts: []corev1.VolumeMount{ + {Name: "server", MountPath: "/app"}, + {Name: "storage", MountPath: "/storage"}, + }, + ReadinessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/health", + Port: intstr.FromInt32(8080), + Scheme: corev1.URISchemeHTTP, + }, + }, + PeriodSeconds: 1, + }, + }, + }, + }, + }) + if err != nil { + return err + } + + // Wait for the container to be started + err = r.pod.WaitForContainerStarted(ctx) + if err != nil { + return err + } + + // Deploy Service + err = r.pod.CreateService(ctx, corev1.ServicePort{ + Name: "api", + Protocol: "TCP", + Port: 8080, + TargetPort: intstr.FromInt32(8080), + }) + if err != nil { + return err + } + + // TODO: Move transfer utilities to *PodObject + // Apply the binary + req := r.pod.ClientSet().CoreV1().RESTClient(). + Post(). + Resource("pods"). + Name(r.pod.Name()). + Namespace(r.pod.Namespace()). + SubResource("exec"). + VersionedParams(&corev1.PodExecOptions{ + Container: "binary-storage", + Command: []string{"tar", "-xzf", "-", "-C", "/app"}, + Stdin: true, + Stdout: true, + Stderr: true, + TTY: false, + }, scheme.ParameterCodec) + + exec, err := remotecommand.NewSPDYExecutor(r.pod.RESTConfig(), "POST", req.URL()) + if err != nil { + return errors2.Wrap(err, "failed to create spdy executor") + } + + os.WriteFile("/tmp/flag", []byte{1}, 0777) + flagFile, err := os.Open("/tmp/flag") + if err != nil { + return errors2.Wrap(err, "failed to open flag file") + } + defer flagFile.Close() + flagFileStat, err := flagFile.Stat() + if err != nil { + return err + } + + file, err := os.Open(r.binary.Path()) + if err != nil { + return err + } + defer file.Close() + fileStat, err := file.Stat() + if err != nil { + return err + } + + tarStream := artifacts.NewTarStream() + go func() { + defer tarStream.Close() + tarStream.Add("server", file, fileStat) + tarStream.Add("server-ready", flagFile, flagFileStat) + }() + + reader, writer := io.Pipe() + var buf []byte + var bufMu sync.Mutex + go func() { + bufMu.Lock() + defer bufMu.Unlock() + buf, _ = io.ReadAll(reader) + }() + err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ + Stdin: tarStream, + Stdout: writer, + Stderr: writer, + Tty: false, + }) + if err != nil { + writer.Close() + bufMu.Lock() + defer bufMu.Unlock() + return fmt.Errorf("failed to stream binary: %s: %s", err.Error(), string(buf)) + } + writer.Close() + + err = r.pod.WaitForReady(ctx) + if err != nil { + return err + } + + r.localPort = GetFreePort() + err = r.pod.Forward(ctx, 8080, r.localPort, true) + if err != nil { + return err + } + + return nil +} + +func (r *BinaryStorage) WaitForReady(ctx context.Context) error { + return r.pod.WaitForReady(ctx) +} + +func (r *BinaryStorage) Is(path string, hash string) bool { + r.hashMu.RLock() + defer r.hashMu.RUnlock() + return r.hashes[path] == hash +} + +func (r *BinaryStorage) SetHash(path string, hash string) { + r.hashMu.Lock() + defer r.hashMu.Unlock() + r.hashes[path] = hash +} + +func (r *BinaryStorage) Upload(ctx context.Context, name string, binary *Binary) (cached bool, size int, err error) { + binary.buildMu.RLock() + defer binary.buildMu.RUnlock() + if binary.hash != "" && r.Is(name, binary.hash) { + return true, 0, nil + } + for i := 0; i < 5; i++ { + size, err = r.upload(ctx, name, binary) + if err == nil { + return + } + if ctx.Err() != nil { + return false, 0, err + } + time.Sleep(time.Duration(100*(i+1)) * time.Millisecond) + } + return false, size, err +} + +func (r *BinaryStorage) upload(ctx context.Context, name string, binary *Binary) (int, error) { + file, err := os.Open(binary.outputPath) + if err != nil { + return 0, err + } + defer file.Close() + + tr := http.DefaultTransport.(*http.Transport).Clone() + tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + client := &http.Client{Transport: tr} + + if binary.hash != "" && binary.prevHash != "" && r.Is(name, binary.prevHash) { + contents, err := binary.patch() + gzipContents := bytes.NewBuffer(nil) + gz := gzip.NewWriter(gzipContents) + io.Copy(gz, bytes.NewBuffer(contents)) + gz.Flush() + gz.Close() + + gzipContentsLen := gzipContents.Len() + req, err := http.NewRequestWithContext(ctx, http.MethodPatch, fmt.Sprintf("http://localhost:%d/%s", r.localPort, name), gzipContents) + if err != nil { + if ctx.Err() != nil { + return 0, err + } + fmt.Printf("error while sending %s patch, fallback to full stream: %s\n", name, err) + } else { + req.ContentLength = int64(gzipContentsLen) + req.Header.Set("Content-Encoding", "gzip") + req.Header.Set("X-Prev-Hash", binary.prevHash) + req.Header.Set("X-Hash", binary.hash) + res, err := client.Do(req) + if err != nil { + fmt.Printf("error while sending %s patch, fallback to full stream: %s\n", name, err) + } else if res.StatusCode != http.StatusOK { + b, _ := io.ReadAll(res.Body) + fmt.Printf("error while sending %s patch, fallback to full stream: status code: %s, message: %s\n", name, res.Status, string(b)) + } else { + r.SetHash(name, binary.hash) + return gzipContentsLen, nil + } + } + } + + buf := bytes.NewBuffer(nil) + gz := gzip.NewWriter(buf) + io.Copy(gz, file) + gz.Flush() + gz.Close() + bufLen := buf.Len() + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://localhost:%d/%s", r.localPort, name), buf) + if err != nil { + return bufLen, err + } + req.ContentLength = int64(bufLen) + req.Header.Set("Content-Encoding", "gzip") + + res, err := client.Do(req) + if err != nil { + return bufLen, err + } + if res.StatusCode != http.StatusOK { + b, _ := io.ReadAll(res.Body) + return bufLen, fmt.Errorf("failed saving file: status code: %d / message: %s", res.StatusCode, string(b)) + } + r.SetHash(name, binary.hash) + return bufLen, nil +} diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go b/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go deleted file mode 100644 index 95c2cafd01..0000000000 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/objectstorage.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2024 Testkube. -// -// Licensed as a Testkube Pro file under the Testkube Community -// License (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// https://github.com/kubeshop/testkube/blob/main/licenses/TCL.txt - -package devutils - -import ( - "bytes" - "context" - "crypto/tls" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "sync" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" - - minio2 "github.com/minio/minio-go/v7" - - "github.com/kubeshop/testkube/cmd/testworkflow-toolkit/artifacts" - "github.com/kubeshop/testkube/internal/common" - "github.com/kubeshop/testkube/pkg/log" - "github.com/kubeshop/testkube/pkg/storage/minio" -) - -type ObjectStorage struct { - pod *PodObject - localPort int - hashes map[string]string - hashMu sync.RWMutex - cachedClient *minio2.Client - cachedClientMu sync.Mutex -} - -func NewObjectStorage(pod *PodObject) *ObjectStorage { - return &ObjectStorage{ - pod: pod, - hashes: make(map[string]string), - } -} - -func (r *ObjectStorage) Is(path string, hash string) bool { - r.hashMu.RLock() - defer r.hashMu.RUnlock() - return r.hashes[path] == hash -} - -func (r *ObjectStorage) SetHash(path string, hash string) { - r.hashMu.Lock() - defer r.hashMu.Unlock() - r.hashes[path] = hash -} - -func (r *ObjectStorage) Create(ctx context.Context) error { - err := r.pod.Create(ctx, &corev1.Pod{ - Spec: corev1.PodSpec{ - TerminationGracePeriodSeconds: common.Ptr(int64(1)), - Containers: []corev1.Container{ - { - Name: "minio", - Image: "minio/minio:RELEASE.2024-10-13T13-34-11Z", - ImagePullPolicy: corev1.PullIfNotPresent, - Args: []string{"server", "/data", "--console-address", ":9090"}, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt32(9000), - }, - }, - PeriodSeconds: 1, - }, - }, - }, - }, - }) - if err != nil { - return err - } - err = r.pod.CreateService(ctx, corev1.ServicePort{ - Name: "api", - Protocol: "TCP", - Port: 9000, - TargetPort: intstr.FromInt32(9000), - }) - if err != nil { - return err - } - - err = r.pod.WaitForContainerStarted(ctx) - if err != nil { - return err - } - - r.localPort = GetFreePort() - err = r.pod.Forward(ctx, 9000, r.localPort, true) - if err != nil { - fmt.Println("Forward error") - return err - } - - c, err := r.Client() - if err != nil { - fmt.Println("Creating client") - return err - } - - // Handle a case when port forwarder is not ready - for i := 0; i < 10; i++ { - makeBucketCtx, ctxCancel := context.WithTimeout(ctx, 5*time.Second) - err = c.MakeBucket(makeBucketCtx, "devbox", minio2.MakeBucketOptions{}) - if err == nil { - ctxCancel() - return nil - } - if ctx.Err() != nil { - ctxCancel() - return ctx.Err() - } - ctxCancel() - } - return nil -} - -func (r *ObjectStorage) Client() (*minio2.Client, error) { - r.cachedClientMu.Lock() - defer r.cachedClientMu.Unlock() - if r.cachedClient != nil { - return r.cachedClient, nil - } - connecter := minio.NewConnecter( - fmt.Sprintf("localhost:%d", r.localPort), - "minioadmin", - "minioadmin", - "", - "", - "devbox", - log.DefaultLogger, - ) - cl, err := connecter.GetClient() - if err != nil { - return nil, err - } - r.cachedClient = cl - return cl, nil -} - -func (r *ObjectStorage) WaitForReady(ctx context.Context) error { - return r.pod.WaitForReady(ctx) -} - -func (r *ObjectStorage) Upload(ctx context.Context, path string, fsPath string, hash string) (cached bool, transferred int, err error) { - c, err := r.Client() - if err != nil { - return false, 0, err - } - if hash != "" && r.Is(path, hash) { - return true, 0, nil - } - putUrl, err := c.PresignHeader(ctx, "PUT", "devbox", path, 15*time.Minute, nil, http.Header{ - "X-Amz-Meta-Snowball-Auto-Extract": {"true"}, - "X-Amz-Meta-Minio-Snowball-Prefix": {filepath.Dir(path)}, - "Content-Type": {"application/gzip"}, - "Content-Encoding": {"gzip"}, - }) - if err != nil { - return false, 0, err - } - - file, err := os.Open(fsPath) - if err != nil { - return false, 0, err - } - defer file.Close() - stat, err := file.Stat() - if err != nil { - return false, 0, err - } - - buf := new(bytes.Buffer) - tarStream := artifacts.NewTarStream() - go func() { - tarStream.Add(filepath.Base(path), file, stat) - tarStream.Close() - }() - io.Copy(buf, tarStream) - bufLen := buf.Len() - - req, err := http.NewRequestWithContext(ctx, http.MethodPut, putUrl.String(), buf) - if err != nil { - return false, bufLen, err - } - req.ContentLength = int64(bufLen) - req.Header.Set("X-Amz-Meta-Snowball-Auto-Extract", "true") - req.Header.Set("X-Amz-Meta-Minio-Snowball-Prefix", filepath.Dir(path)) - req.Header.Set("Content-Type", "application/gzip") - req.Header.Set("Content-Encoding", "gzip") - - tr := http.DefaultTransport.(*http.Transport).Clone() - tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - client := &http.Client{Transport: tr} - res, err := client.Do(req) - if err != nil { - return false, bufLen, err - } - if res.StatusCode != http.StatusOK { - b, _ := io.ReadAll(res.Body) - return false, bufLen, fmt.Errorf("failed saving file: status code: %d / message: %s", res.StatusCode, string(b)) - } - r.SetHash(path, hash) - return false, bufLen, nil -} From 0832b7b5a178f080653eeeea52b92d23addc47f6 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Wed, 30 Oct 2024 14:57:16 +0100 Subject: [PATCH 21/28] feat: make BinaryPatch more stable --- .../devbox/devutils/binarypatch.go | 601 +++++++++++++++--- 1 file changed, 504 insertions(+), 97 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go index 8effd6b27d..51b26866bb 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go @@ -11,12 +11,18 @@ package devutils import ( "bytes" "encoding/binary" + "fmt" + "math" + "slices" "time" + + "github.com/dustin/go-humanize" + "k8s.io/apimachinery/pkg/util/rand" ) const ( BinaryPatchAddOp = 1 - BinaryPatchPreserveOp = 2 + BinaryPatchOriginalOp = 2 BinaryPatchDeleteOp = 3 BinaryBatchBlockSize = 100 @@ -67,126 +73,527 @@ func (p *BinaryPatch) Load(data []byte) { } func (p *BinaryPatch) Read(originalFile, currentFile []byte, maxDuration time.Duration) { - originalFileLen := len(originalFile) - currentFileLen := len(currentFile) - smallerFileLen := min(originalFileLen, currentFileLen) + size := int32(30) + step := int32(10) - iteration := 0 - currentIndex := 0 - originalIndex := 0 + ts := time.Now() - // Omit same sequence - omit := 0 - for omit < smallerFileLen && currentFile[omit] == originalFile[omit] { - omit++ + originalMarkers := make([][]int32, math.MaxUint16+1) + originalIterations := int32(len(originalFile)) - size + for i := int32(0); i < originalIterations; i++ { + marker := uint16(originalFile[i]) | uint16(originalFile[i+size])<<8 + originalMarkers[marker] = append(originalMarkers[marker], i) } - originalIndex += omit - currentIndex += omit - p.Preserve(omit) - replaced := 0 - ts := time.Now() + // Delete most popular characters to avoid problems with too many iterations + lenSum := 0 + for i := uint16(0); i < math.MaxUint16; i++ { + lenSum += len(originalMarkers[i]) + } + lenAvg := lenSum / len(originalMarkers) + deleted := 0 + for i := uint16(0); i < math.MaxUint16; i++ { + if len(originalMarkers[i]) > lenAvg { + deleted += len(originalMarkers[i]) + originalMarkers[i] = nil + } + } + // Sort all the markers + for i := uint16(0); i < math.MaxUint16; i++ { + slices.Sort(originalMarkers[i]) + } + + ciMax := int32(len(currentFile) - 1) + oiMax := int32(len(originalFile) - 1) + + lastOi := int32(0) + lastCi := int32(0) + totalSaved := 0 + iterations := 0 + + maxIndex := int32(len(currentFile)) - size loop: - for { - iteration++ - leftCurrent := currentFileLen - currentIndex - leftOriginal := originalFileLen - originalIndex - leftMin := min(leftCurrent, leftOriginal) - if leftMin <= BinaryPatchMinCommonSize { - break - } - segment := min(leftMin, BinaryBatchBlockSize*BinaryBatchBlockFactor) - BinaryPatchMinCommonSize - maxIterations := min(segment+replaced, leftMin) - - // Extract fast when duration passed - if maxDuration != 0 && iteration%1000 == 0 && maxDuration < time.Since(ts) { - p.Delete(originalFileLen - originalIndex) - p.Add(currentFile[currentIndex:]) - return + for ci := int32(0); ci < maxIndex; ci += step { + marker := uint16(currentFile[ci]) | uint16(currentFile[ci+size])<<8 + if len(originalMarkers[marker]) == 0 { + ci = ci - step/2 + continue } + iterations++ - // Try recovering from an endless loop - if maxIterations > BinaryBatchBlockSize*BinaryBatchBlockFactorMax { - p.Add(currentFile[currentIndex : currentIndex+maxIterations/2]) - replaced -= maxIterations / 2 - currentIndex += maxIterations / 2 - continue loop + if maxDuration != 0 && iterations%1000 == 0 && time.Since(ts) > maxDuration { + break } - // Find next match when adding to original file - biggestCommon := 0 - biggestCommonAt := 0 - biggestCommonDel := false - for i := 0; i < maxIterations; i++ { - common := 0 - for i+common < leftMin && currentFile[currentIndex+i+common] == originalFile[originalIndex+common] { - common++ + if iterations%20000 == 0 && 100*ci/maxIndex < 50 { + step = step * 4 / 3 + ci -= step + continue + } + for _, oi := range originalMarkers[marker] { + if (oi < step || ci < step || originalFile[oi-step] != currentFile[ci-step]) && (oi+step > oiMax || ci+step > ciMax || originalFile[oi+step] != currentFile[ci+step]) { + continue } - if common > biggestCommon { - biggestCommon = common - biggestCommonAt = i - biggestCommonDel = false + + // Validate exact range + l, r := int32(0), int32(0) + for ; ci-l > lastCi && oi-l > lastOi && originalFile[oi-l-1] == currentFile[ci-l-1]; l++ { } - common = 0 - for i+common < leftMin && currentFile[currentIndex+common] == originalFile[originalIndex+i+common] { - common++ + for ; oi+r < oiMax && ci+r < ciMax && originalFile[oi+r+1] == currentFile[ci+r+1]; r++ { } - if common > biggestCommon { - biggestCommon = common - biggestCommonAt = i - biggestCommonDel = true + // Determine if it's nice + if l+r > 14 { + totalSaved += int(r + l + 1) + p.Add(currentFile[lastCi : ci-l]) + lastCi = ci + r + 1 + ci = lastCi + 1 - step + p.Original(int(oi-l), int(r+l+1)) + continue loop } } + } + + p.Add(currentFile[lastCi:]) +} + +func (p *BinaryPatch) Read333(originalFile, currentFile []byte, maxDuration time.Duration) { + ts := time.Now() + size := int32(32_000) + + currentMarkers := make([][]int32, math.MaxUint16+1) + currentIterations := int32(len(currentFile)) - size + for i := int32(0); i < currentIterations; i++ { + marker := uint16(currentFile[i]) | uint16(currentFile[i+size])<<8 + currentMarkers[marker] = append(currentMarkers[marker], i) + } + + // Sort all the markers + for i := uint16(0); i < math.MaxUint16; i++ { + slices.Sort(currentMarkers[i]) + } + + fmt.Println("indexed in", time.Since(ts)) + + ciMax := int32(len(currentFile) - 1) + oiMax := int32(len(originalFile) - 1) + + samples := make([]int32, 1000) + for i := 0; i < 1000; i++ { + samples[i] = int32(rand.Intn(len(originalFile) - int(size))) + } + slices.Sort(samples) + + lastOi := int32(0) + lastCi := int32(0) + totalSaved := 0 + +loop: + for _, oi := range samples { + if oi <= lastOi { + continue + } + marker := uint16(originalFile[oi]) | uint16(originalFile[oi+size])<<8 + maxOL, maxOR := int32(0), int32(0) + maxCL, maxCR := int32(0), int32(0) + for _, ci := range currentMarkers[marker] { + if ci <= lastCi { + continue + } + // Check if it has not been validated lately + if maxCL <= ci && maxCR >= ci { + continue + } - if biggestCommon >= BinaryPatchMinCommonSize { - if biggestCommonDel { - p.Delete(biggestCommonAt) - p.Preserve(biggestCommon) - replaced += biggestCommonAt - originalIndex += biggestCommonAt + biggestCommon - currentIndex += biggestCommon - } else { - p.Add(currentFile[currentIndex : currentIndex+biggestCommonAt]) - p.Preserve(biggestCommon) - replaced -= biggestCommonAt - currentIndex += biggestCommonAt + biggestCommon - originalIndex += biggestCommon + // Validate exact range + l, r := int32(0), int32(0) + // Determine left side + for ; ci-l != 0 && oi-l != 0 && originalFile[oi-l-1] == currentFile[ci-l-1]; l++ { + } + // Determine right side + for ; oi+r != oiMax && ci+r != ciMax && originalFile[oi+r+1] == currentFile[ci+r+1]; r++ { + } + if maxOR-maxOL < r+l { + maxOL = oi - l + maxOR = oi + r + maxCL = ci - l + maxCR = ci + r } continue loop } - // Treat some part as deleted to proceed - p.Delete(BinaryPatchMinCommonSize) - replaced += BinaryPatchMinCommonSize - originalIndex += BinaryPatchMinCommonSize + if maxOR-maxOL > size { + lastOi = maxOR + lastCi = maxCR + totalSaved += int(maxOR - maxOL + 1) + fmt.Printf("Detected %s common (org: %d-%d, new: %d-%d)\n", humanize.Bytes(uint64(maxOR-maxOL)), maxOL, maxOR, maxCL, maxCR) + } } + fmt.Printf("Saved %s out of %s binary\n", humanize.Bytes(uint64(totalSaved)), humanize.Bytes(uint64(len(currentFile)))) +} + +func (p *BinaryPatch) oooRead(originalFile, currentFile []byte, maxDuration time.Duration) { + //size := int32(min(len(originalFile)/10, len(currentFile)/10)) + size := int32(32_000) + //pivotSize := size / 8 + ts := time.Now() + + //// Index current file by start and end + //ts := time.Now() + //currentZeroBytes := []int32{} + ////currentIndex := make(map[byte]map[byte]int32, 255) + ////for i := 0; i <= 255; i++ { + //// currentIndex[byte(i)] = make(map[byte]int32) + ////} + //for i := 0; i < len(currentFile)-size; i++ { + // if currentFile[i] == 0 { + // currentZeroBytes = append(currentZeroBytes, int32(i)) + // } + // //currentIndex[currentFile[i]][currentFile[i+size]] = int32(i) + // //u := binary.BigEndian.Uint16([]byte{currentFile[i], currentFile[i+size]}) + // //currentIndex[u] = append(currentIndex[u], int32(i)) + //} - if currentIndex != currentFileLen { - p.Add(currentFile[currentIndex:]) - } else if originalIndex != originalFileLen { - p.Preserve(originalFileLen - originalIndex) + currentMarkers := make(map[uint16][]int32, math.MaxUint16+1) + for i := uint16(0); i < math.MaxUint16; i++ { + currentMarkers[i] = make([]int32, 0) } + currentIterations := int32(len(currentFile)) - size + for i := int32(0); i < currentIterations; i++ { + u := uint16(currentFile[i]) | uint16(currentFile[i+size])<<8 + currentMarkers[u] = append(currentMarkers[u], i) + } + + ciMax := int32(len(currentFile) - 1) + oiMax := int32(len(originalFile) - 1) + + //originalMarkers := make(map[uint16][]int32, math.MaxUint16+1) + //originalIterations := int32(len(originalFile)) - size + //for i := int32(0); i < originalIterations; i++ { + // if originalFile[i] != 0 { + // continue + // } + // u := uint16(originalFile[i+pivotSize]) | uint16(originalFile[i+size])<<8 + // if _, ok := originalMarkers[u]; !ok { + // originalMarkers[u] = []int32{i} + // } else { + // originalMarkers[u] = append(originalMarkers[u], i) + // } + //} + fmt.Println("Indexed files in", time.Since(ts)) + + type set struct { + start int32 + end int32 + } + commonCurrentSets := make([]set, 0) + commonOriginalSets := make([]set, 0) + +loop1: + for i := 0; i < 10000; i++ { + t := time.Now() + oi := int32(rand.Intn(len(originalFile) - int(size))) + for _, v := range commonOriginalSets { + if v.start <= oi && v.end >= oi { + fmt.Printf("exists. took %s\n", time.Since(t)) + continue loop1 + } + } + u := uint16(originalFile[oi]) | uint16(originalFile[oi+size])<<8 + + maxOL, maxOR := int32(0), int32(0) + maxCL, maxCR := int32(0), int32(0) + loop: + for _, ci := range currentMarkers[u] { + // Check if it has not been validated lately + if maxCL <= ci && maxCR >= ci { + continue + } + // Check if it's not already in the common current set + for _, v := range commonCurrentSets { + if v.start <= ci && v.end >= ci { + continue loop + } + } + + // Validate exact range + l, r := int32(0), int32(0) + // Determine left side + for ; ci-l != 0 && oi-l != 0 && originalFile[oi-l-1] == currentFile[ci-l-1]; l++ { + } + // Determine right side + for ; oi+r != oiMax && ci+r != ciMax && originalFile[oi+r+1] != currentFile[oi+r+1]; r++ { + } + if maxOR-maxOL < r+l { + maxOL = oi - l + maxOR = oi + r + maxCL = ci - l + maxCR = ci + r + } + } + if maxOR-maxOL < 16000 { + fmt.Printf("too small. took %s\n", time.Since(t)) + continue + } + commonCurrentSets = append(commonCurrentSets, set{start: maxCL, end: maxCR}) + commonOriginalSets = append(commonCurrentSets, set{start: maxOL, end: maxOR}) + fmt.Printf("Detected %s common (org: %d-%d, new: %d-%d)\n", humanize.Bytes(uint64(maxOR-maxOL)), maxOL, maxOR, maxCL, maxCR) + fmt.Printf("took %s\n", time.Since(t)) + } + + //for k1, v1 := range currentMarkers { + // for _, i := range originalMarkers[k1] { + // loop: + // for _, j := range v1 { + // if originalFile[i] != currentFile[j] || + // originalFile[i+size] != currentFile[j+size] || + // originalFile[i+pivotSize] != currentFile[j+pivotSize] || + // originalFile[i+pivotSize*2] != currentFile[j+pivotSize*2] || + // originalFile[i+pivotSize*3] != currentFile[j+pivotSize*3] || + // originalFile[i+pivotSize*4] != currentFile[j+pivotSize*4] || + // originalFile[i+pivotSize*5] != currentFile[j+pivotSize*5] || + // originalFile[i+pivotSize*6] != currentFile[j+pivotSize*6] || + // originalFile[i+pivotSize*7] != currentFile[j+pivotSize*7] || + // originalFile[i+size] != currentFile[j+size] { + // continue + // } + // for k := int32(1); k < size; k++ { + // if originalFile[i+k] != currentFile[j+k] { + // continue loop + // } + // } + // l, r := int32(0), size + // for ; i-l >= 0 && j-l >= 0; l++ { + // if l == 0 || originalFile[i-l-1] != currentFile[j-l-1] { + // break + // } + // } + // for ; i+r < originalIterations && j+r < currentIterations; r++ { + // if originalFile[i+r+1] != currentFile[j+r+1] { + // break + // } + // } + // fmt.Printf("found common %s\n", humanize.Bytes(uint64(l+size+r))) + // } + // } + //} + + //iterations = int32(len(originalFile)) - size + //for i := int32(0); i < iterations; i += 1 { + // if originalFile[i] != 0 { + // continue + // } + // u := uint16(255*originalFile[i] + originalFile[i+size]) + // fmt.Println("found potential", len(currentMarkers[u])) + //loop: + // for _, j := range currentMarkers[u] { + // if originalFile[i] != currentFile[j] || + // originalFile[i+size] != currentFile[j+size] || + // originalFile[i+pivotSize] != currentFile[j+pivotSize] || + // originalFile[i+pivotSize*2] != currentFile[j+pivotSize*2] || + // originalFile[i+pivotSize*3] != currentFile[j+pivotSize*3] || + // originalFile[i+pivotSize*4] != currentFile[j+pivotSize*4] || + // originalFile[i+pivotSize*5] != currentFile[j+pivotSize*5] || + // originalFile[i+pivotSize*6] != currentFile[j+pivotSize*6] || + // originalFile[i+pivotSize*7] != currentFile[j+pivotSize*7] || + // originalFile[i+size] != currentFile[j+size] { + // continue + // } + // fmt.Printf("potential\n") + // + // for k := int32(1); k < size; k++ { + // if originalFile[i+k] != currentFile[j+k] { + // fmt.Printf(" nope at %d\n", k) + // continue loop + // } + // } + // fmt.Printf("%x\n", currentFile[j+pivotSize-1:j+pivotSize+1]) + // os.Exit(0) + // } + //} + //originalMarkers := make(map[byte][]int32) + //for i := 0; i <= 255; i++ { + // originalMarkers[byte(i)] = make([]int32, 0) + //} + //for i := 0; i < len(originalFile)-size; i++ { + // if originalFile[i] == 0 { + // originalMarkers[originalFile[i+size]] = append(originalMarkers[originalFile[i+size]], int32(i)) + // } + //} + //iterations = int32(len(originalFile)) - size + //for i := int32(0); i < iterations; i++ { + // if originalFile[i] == 0 { + // for _, j := range currentMarkers[originalFile[i+size]] { + // fmt.Println("checking zero") + // if originalFile[i] != currentFile[j] || + // originalFile[i+size] != currentFile[j+size] || + // originalFile[i+pivotSize] != currentFile[j+pivotSize] || + // originalFile[i+pivotSize*2] != currentFile[j+pivotSize*2] || + // originalFile[i+pivotSize*3] != currentFile[j+pivotSize*3] || + // originalFile[i+pivotSize*4] != currentFile[j+pivotSize*4] || + // originalFile[i+pivotSize*5] != currentFile[j+pivotSize*5] || + // originalFile[i+pivotSize*6] != currentFile[j+pivotSize*6] || + // originalFile[i+pivotSize*7] != currentFile[j+pivotSize*7] || + // originalFile[i+size] != currentFile[j+size] { + // continue + // } + // fmt.Println("found match") + // } + // } + //} + + //for percentage := 20; percentage >= 10; percentage -= 10 { + // fmt.Println("Checking percentage", percentage) + // pivotSize := size / 8 + // for i := 0; i < len(originalFile)-size; i++ { + // if i%100 == 0 { + // progress := 100 * i / (len(currentFile) - size) + // fmt.Println(progress) + // } + // loop: + // for j := 0; j < len(currentFile)-size; j++ { + // if originalFile[i] != currentFile[j] || + // originalFile[i+size] != currentFile[j+size] || + // originalFile[i+pivotSize] != currentFile[j+pivotSize] || + // originalFile[i+pivotSize*2] != currentFile[j+pivotSize*2] || + // originalFile[i+pivotSize*3] != currentFile[j+pivotSize*3] || + // originalFile[i+pivotSize*4] != currentFile[j+pivotSize*4] || + // originalFile[i+pivotSize*5] != currentFile[j+pivotSize*5] || + // originalFile[i+pivotSize*6] != currentFile[j+pivotSize*6] || + // originalFile[i+pivotSize*7] != currentFile[j+pivotSize*7] || + // originalFile[i+size] != currentFile[j+size] { + // continue + // } + // fmt.Printf("detected potential %s block\n", humanize.Bytes(uint64(size))) + // for k := 1; k < size; k++ { + // if originalFile[i+k] != currentFile[i+k] { + // fmt.Printf(" nope at %d\n", k) + // continue loop + // } + // } + // fmt.Printf("detected %s block\n", humanize.Bytes(uint64(size))) + // os.Exit(0) + // } + // } + //} } +//func (p *BinaryPatch) OldRead(originalFile, currentFile []byte, maxDuration time.Duration) { +// originalFileLen := len(originalFile) +// currentFileLen := len(currentFile) +// smallerFileLen := min(originalFileLen, currentFileLen) +// +// iteration := 0 +// currentIndex := 0 +// originalIndex := 0 +// +// // Omit same sequence +// omit := 0 +// for omit < smallerFileLen && currentFile[omit] == originalFile[omit] { +// omit++ +// } +// originalIndex += omit +// currentIndex += omit +// p.Preserve(omit) +// +// replaced := 0 +// ts := time.Now() +// +//loop: +// for { +// iteration++ +// leftCurrent := currentFileLen - currentIndex +// leftOriginal := originalFileLen - originalIndex +// leftMin := min(leftCurrent, leftOriginal) +// if leftMin <= BinaryPatchMinCommonSize { +// break +// } +// segment := min(leftMin, BinaryBatchBlockSize*BinaryBatchBlockFactor) - BinaryPatchMinCommonSize +// maxIterations := min(segment+replaced, leftMin) +// +// // Extract fast when duration passed +// if maxDuration != 0 && iteration%1000 == 0 && maxDuration < time.Since(ts) { +// p.Delete(originalFileLen - originalIndex) +// p.Add(currentFile[currentIndex:]) +// return +// } +// +// // Try recovering from an endless loop +// if maxIterations > BinaryBatchBlockSize*BinaryBatchBlockFactorMax { +// p.Add(currentFile[currentIndex : currentIndex+maxIterations/2]) +// replaced -= maxIterations / 2 +// currentIndex += maxIterations / 2 +// continue loop +// } +// +// // Find next match when adding to original file +// biggestCommon := 0 +// biggestCommonAt := 0 +// biggestCommonDel := false +// for i := 0; i < maxIterations; i++ { +// common := 0 +// for i+common < leftMin && currentFile[currentIndex+i+common] == originalFile[originalIndex+common] { +// common++ +// } +// if common > biggestCommon { +// biggestCommon = common +// biggestCommonAt = i +// biggestCommonDel = false +// } +// common = 0 +// for i+common < leftMin && currentFile[currentIndex+common] == originalFile[originalIndex+i+common] { +// common++ +// } +// if common > biggestCommon { +// biggestCommon = common +// biggestCommonAt = i +// biggestCommonDel = true +// } +// } +// +// if biggestCommon >= BinaryPatchMinCommonSize { +// if biggestCommonDel { +// p.Delete(biggestCommonAt) +// p.Preserve(biggestCommon) +// replaced += biggestCommonAt +// originalIndex += biggestCommonAt + biggestCommon +// currentIndex += biggestCommon +// } else { +// p.Add(currentFile[currentIndex : currentIndex+biggestCommonAt]) +// p.Preserve(biggestCommon) +// replaced -= biggestCommonAt +// currentIndex += biggestCommonAt + biggestCommon +// originalIndex += biggestCommon +// } +// continue loop +// } +// +// // Treat some part as deleted to proceed +// p.Delete(BinaryPatchMinCommonSize) +// replaced += BinaryPatchMinCommonSize +// originalIndex += BinaryPatchMinCommonSize +// } +// +// if currentIndex != currentFileLen { +// p.Add(currentFile[currentIndex:]) +// } else if originalIndex != originalFileLen { +// p.Preserve(originalFileLen - originalIndex) +// } +//} + func (p *BinaryPatch) Len() int { return p.buf.Len() } -func (p *BinaryPatch) Preserve(bytesCount int) { +func (p *BinaryPatch) Original(index, bytesCount int) { if bytesCount == 0 { return } - if p.lastOp == BinaryPatchPreserveOp { - p.lastCount += bytesCount - b := p.buf.Bytes() - binary.LittleEndian.PutUint32(b[len(b)-4:], uint32(p.lastCount)) - return - } - p.lastOp = BinaryPatchPreserveOp - p.lastCount = bytesCount - p.buf.WriteByte(BinaryPatchPreserveOp) + p.lastOp = BinaryPatchOriginalOp + p.buf.WriteByte(BinaryPatchOriginalOp) num := make([]byte, 4) + binary.LittleEndian.PutUint32(num, uint32(index)) + p.buf.Write(num) binary.LittleEndian.PutUint32(num, uint32(bytesCount)) p.buf.Write(num) } @@ -235,15 +642,15 @@ func (p *BinaryPatch) Apply(original []byte) []byte { patch := p.buf.Bytes() for i := 0; i < len(patch); { switch patch[i] { - case BinaryPatchPreserveOp: - count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) - result = append(result, original[:count]...) - original = original[count:] - i += 5 - case BinaryPatchDeleteOp: - count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) - original = original[count:] - i += 5 + case BinaryPatchOriginalOp: + index := binary.LittleEndian.Uint32(patch[i+1 : i+5]) + count := binary.LittleEndian.Uint32(patch[i+5 : i+9]) + result = append(result, original[index:index+count]...) + i += 9 + //case BinaryPatchDeleteOp: + // count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) + // original = original[count:] + // i += 5 case BinaryPatchAddOp: count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) result = append(result, patch[i+5:i+5+int(count)]...) From e489e79601a9edb69bff53a816be7ef57d03a649 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Wed, 30 Oct 2024 16:56:21 +0100 Subject: [PATCH 22/28] fixup delete unused code --- .../devbox/devutils/binarypatch.go | 430 ------------------ 1 file changed, 430 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go index 51b26866bb..bc61cae191 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go @@ -11,13 +11,9 @@ package devutils import ( "bytes" "encoding/binary" - "fmt" "math" "slices" "time" - - "github.com/dustin/go-humanize" - "k8s.io/apimachinery/pkg/util/rand" ) const ( @@ -91,10 +87,8 @@ func (p *BinaryPatch) Read(originalFile, currentFile []byte, maxDuration time.Du lenSum += len(originalMarkers[i]) } lenAvg := lenSum / len(originalMarkers) - deleted := 0 for i := uint16(0); i < math.MaxUint16; i++ { if len(originalMarkers[i]) > lenAvg { - deleted += len(originalMarkers[i]) originalMarkers[i] = nil } } @@ -157,430 +151,6 @@ loop: p.Add(currentFile[lastCi:]) } -func (p *BinaryPatch) Read333(originalFile, currentFile []byte, maxDuration time.Duration) { - ts := time.Now() - size := int32(32_000) - - currentMarkers := make([][]int32, math.MaxUint16+1) - currentIterations := int32(len(currentFile)) - size - for i := int32(0); i < currentIterations; i++ { - marker := uint16(currentFile[i]) | uint16(currentFile[i+size])<<8 - currentMarkers[marker] = append(currentMarkers[marker], i) - } - - // Sort all the markers - for i := uint16(0); i < math.MaxUint16; i++ { - slices.Sort(currentMarkers[i]) - } - - fmt.Println("indexed in", time.Since(ts)) - - ciMax := int32(len(currentFile) - 1) - oiMax := int32(len(originalFile) - 1) - - samples := make([]int32, 1000) - for i := 0; i < 1000; i++ { - samples[i] = int32(rand.Intn(len(originalFile) - int(size))) - } - slices.Sort(samples) - - lastOi := int32(0) - lastCi := int32(0) - totalSaved := 0 - -loop: - for _, oi := range samples { - if oi <= lastOi { - continue - } - marker := uint16(originalFile[oi]) | uint16(originalFile[oi+size])<<8 - maxOL, maxOR := int32(0), int32(0) - maxCL, maxCR := int32(0), int32(0) - for _, ci := range currentMarkers[marker] { - if ci <= lastCi { - continue - } - // Check if it has not been validated lately - if maxCL <= ci && maxCR >= ci { - continue - } - - // Validate exact range - l, r := int32(0), int32(0) - // Determine left side - for ; ci-l != 0 && oi-l != 0 && originalFile[oi-l-1] == currentFile[ci-l-1]; l++ { - } - // Determine right side - for ; oi+r != oiMax && ci+r != ciMax && originalFile[oi+r+1] == currentFile[ci+r+1]; r++ { - } - if maxOR-maxOL < r+l { - maxOL = oi - l - maxOR = oi + r - maxCL = ci - l - maxCR = ci + r - } - continue loop - } - - if maxOR-maxOL > size { - lastOi = maxOR - lastCi = maxCR - totalSaved += int(maxOR - maxOL + 1) - fmt.Printf("Detected %s common (org: %d-%d, new: %d-%d)\n", humanize.Bytes(uint64(maxOR-maxOL)), maxOL, maxOR, maxCL, maxCR) - } - } - fmt.Printf("Saved %s out of %s binary\n", humanize.Bytes(uint64(totalSaved)), humanize.Bytes(uint64(len(currentFile)))) -} - -func (p *BinaryPatch) oooRead(originalFile, currentFile []byte, maxDuration time.Duration) { - //size := int32(min(len(originalFile)/10, len(currentFile)/10)) - size := int32(32_000) - //pivotSize := size / 8 - ts := time.Now() - - //// Index current file by start and end - //ts := time.Now() - //currentZeroBytes := []int32{} - ////currentIndex := make(map[byte]map[byte]int32, 255) - ////for i := 0; i <= 255; i++ { - //// currentIndex[byte(i)] = make(map[byte]int32) - ////} - //for i := 0; i < len(currentFile)-size; i++ { - // if currentFile[i] == 0 { - // currentZeroBytes = append(currentZeroBytes, int32(i)) - // } - // //currentIndex[currentFile[i]][currentFile[i+size]] = int32(i) - // //u := binary.BigEndian.Uint16([]byte{currentFile[i], currentFile[i+size]}) - // //currentIndex[u] = append(currentIndex[u], int32(i)) - //} - - currentMarkers := make(map[uint16][]int32, math.MaxUint16+1) - for i := uint16(0); i < math.MaxUint16; i++ { - currentMarkers[i] = make([]int32, 0) - } - currentIterations := int32(len(currentFile)) - size - for i := int32(0); i < currentIterations; i++ { - u := uint16(currentFile[i]) | uint16(currentFile[i+size])<<8 - currentMarkers[u] = append(currentMarkers[u], i) - } - - ciMax := int32(len(currentFile) - 1) - oiMax := int32(len(originalFile) - 1) - - //originalMarkers := make(map[uint16][]int32, math.MaxUint16+1) - //originalIterations := int32(len(originalFile)) - size - //for i := int32(0); i < originalIterations; i++ { - // if originalFile[i] != 0 { - // continue - // } - // u := uint16(originalFile[i+pivotSize]) | uint16(originalFile[i+size])<<8 - // if _, ok := originalMarkers[u]; !ok { - // originalMarkers[u] = []int32{i} - // } else { - // originalMarkers[u] = append(originalMarkers[u], i) - // } - //} - fmt.Println("Indexed files in", time.Since(ts)) - - type set struct { - start int32 - end int32 - } - commonCurrentSets := make([]set, 0) - commonOriginalSets := make([]set, 0) - -loop1: - for i := 0; i < 10000; i++ { - t := time.Now() - oi := int32(rand.Intn(len(originalFile) - int(size))) - for _, v := range commonOriginalSets { - if v.start <= oi && v.end >= oi { - fmt.Printf("exists. took %s\n", time.Since(t)) - continue loop1 - } - } - u := uint16(originalFile[oi]) | uint16(originalFile[oi+size])<<8 - - maxOL, maxOR := int32(0), int32(0) - maxCL, maxCR := int32(0), int32(0) - loop: - for _, ci := range currentMarkers[u] { - // Check if it has not been validated lately - if maxCL <= ci && maxCR >= ci { - continue - } - // Check if it's not already in the common current set - for _, v := range commonCurrentSets { - if v.start <= ci && v.end >= ci { - continue loop - } - } - - // Validate exact range - l, r := int32(0), int32(0) - // Determine left side - for ; ci-l != 0 && oi-l != 0 && originalFile[oi-l-1] == currentFile[ci-l-1]; l++ { - } - // Determine right side - for ; oi+r != oiMax && ci+r != ciMax && originalFile[oi+r+1] != currentFile[oi+r+1]; r++ { - } - if maxOR-maxOL < r+l { - maxOL = oi - l - maxOR = oi + r - maxCL = ci - l - maxCR = ci + r - } - } - if maxOR-maxOL < 16000 { - fmt.Printf("too small. took %s\n", time.Since(t)) - continue - } - commonCurrentSets = append(commonCurrentSets, set{start: maxCL, end: maxCR}) - commonOriginalSets = append(commonCurrentSets, set{start: maxOL, end: maxOR}) - fmt.Printf("Detected %s common (org: %d-%d, new: %d-%d)\n", humanize.Bytes(uint64(maxOR-maxOL)), maxOL, maxOR, maxCL, maxCR) - fmt.Printf("took %s\n", time.Since(t)) - } - - //for k1, v1 := range currentMarkers { - // for _, i := range originalMarkers[k1] { - // loop: - // for _, j := range v1 { - // if originalFile[i] != currentFile[j] || - // originalFile[i+size] != currentFile[j+size] || - // originalFile[i+pivotSize] != currentFile[j+pivotSize] || - // originalFile[i+pivotSize*2] != currentFile[j+pivotSize*2] || - // originalFile[i+pivotSize*3] != currentFile[j+pivotSize*3] || - // originalFile[i+pivotSize*4] != currentFile[j+pivotSize*4] || - // originalFile[i+pivotSize*5] != currentFile[j+pivotSize*5] || - // originalFile[i+pivotSize*6] != currentFile[j+pivotSize*6] || - // originalFile[i+pivotSize*7] != currentFile[j+pivotSize*7] || - // originalFile[i+size] != currentFile[j+size] { - // continue - // } - // for k := int32(1); k < size; k++ { - // if originalFile[i+k] != currentFile[j+k] { - // continue loop - // } - // } - // l, r := int32(0), size - // for ; i-l >= 0 && j-l >= 0; l++ { - // if l == 0 || originalFile[i-l-1] != currentFile[j-l-1] { - // break - // } - // } - // for ; i+r < originalIterations && j+r < currentIterations; r++ { - // if originalFile[i+r+1] != currentFile[j+r+1] { - // break - // } - // } - // fmt.Printf("found common %s\n", humanize.Bytes(uint64(l+size+r))) - // } - // } - //} - - //iterations = int32(len(originalFile)) - size - //for i := int32(0); i < iterations; i += 1 { - // if originalFile[i] != 0 { - // continue - // } - // u := uint16(255*originalFile[i] + originalFile[i+size]) - // fmt.Println("found potential", len(currentMarkers[u])) - //loop: - // for _, j := range currentMarkers[u] { - // if originalFile[i] != currentFile[j] || - // originalFile[i+size] != currentFile[j+size] || - // originalFile[i+pivotSize] != currentFile[j+pivotSize] || - // originalFile[i+pivotSize*2] != currentFile[j+pivotSize*2] || - // originalFile[i+pivotSize*3] != currentFile[j+pivotSize*3] || - // originalFile[i+pivotSize*4] != currentFile[j+pivotSize*4] || - // originalFile[i+pivotSize*5] != currentFile[j+pivotSize*5] || - // originalFile[i+pivotSize*6] != currentFile[j+pivotSize*6] || - // originalFile[i+pivotSize*7] != currentFile[j+pivotSize*7] || - // originalFile[i+size] != currentFile[j+size] { - // continue - // } - // fmt.Printf("potential\n") - // - // for k := int32(1); k < size; k++ { - // if originalFile[i+k] != currentFile[j+k] { - // fmt.Printf(" nope at %d\n", k) - // continue loop - // } - // } - // fmt.Printf("%x\n", currentFile[j+pivotSize-1:j+pivotSize+1]) - // os.Exit(0) - // } - //} - //originalMarkers := make(map[byte][]int32) - //for i := 0; i <= 255; i++ { - // originalMarkers[byte(i)] = make([]int32, 0) - //} - //for i := 0; i < len(originalFile)-size; i++ { - // if originalFile[i] == 0 { - // originalMarkers[originalFile[i+size]] = append(originalMarkers[originalFile[i+size]], int32(i)) - // } - //} - //iterations = int32(len(originalFile)) - size - //for i := int32(0); i < iterations; i++ { - // if originalFile[i] == 0 { - // for _, j := range currentMarkers[originalFile[i+size]] { - // fmt.Println("checking zero") - // if originalFile[i] != currentFile[j] || - // originalFile[i+size] != currentFile[j+size] || - // originalFile[i+pivotSize] != currentFile[j+pivotSize] || - // originalFile[i+pivotSize*2] != currentFile[j+pivotSize*2] || - // originalFile[i+pivotSize*3] != currentFile[j+pivotSize*3] || - // originalFile[i+pivotSize*4] != currentFile[j+pivotSize*4] || - // originalFile[i+pivotSize*5] != currentFile[j+pivotSize*5] || - // originalFile[i+pivotSize*6] != currentFile[j+pivotSize*6] || - // originalFile[i+pivotSize*7] != currentFile[j+pivotSize*7] || - // originalFile[i+size] != currentFile[j+size] { - // continue - // } - // fmt.Println("found match") - // } - // } - //} - - //for percentage := 20; percentage >= 10; percentage -= 10 { - // fmt.Println("Checking percentage", percentage) - // pivotSize := size / 8 - // for i := 0; i < len(originalFile)-size; i++ { - // if i%100 == 0 { - // progress := 100 * i / (len(currentFile) - size) - // fmt.Println(progress) - // } - // loop: - // for j := 0; j < len(currentFile)-size; j++ { - // if originalFile[i] != currentFile[j] || - // originalFile[i+size] != currentFile[j+size] || - // originalFile[i+pivotSize] != currentFile[j+pivotSize] || - // originalFile[i+pivotSize*2] != currentFile[j+pivotSize*2] || - // originalFile[i+pivotSize*3] != currentFile[j+pivotSize*3] || - // originalFile[i+pivotSize*4] != currentFile[j+pivotSize*4] || - // originalFile[i+pivotSize*5] != currentFile[j+pivotSize*5] || - // originalFile[i+pivotSize*6] != currentFile[j+pivotSize*6] || - // originalFile[i+pivotSize*7] != currentFile[j+pivotSize*7] || - // originalFile[i+size] != currentFile[j+size] { - // continue - // } - // fmt.Printf("detected potential %s block\n", humanize.Bytes(uint64(size))) - // for k := 1; k < size; k++ { - // if originalFile[i+k] != currentFile[i+k] { - // fmt.Printf(" nope at %d\n", k) - // continue loop - // } - // } - // fmt.Printf("detected %s block\n", humanize.Bytes(uint64(size))) - // os.Exit(0) - // } - // } - //} -} - -//func (p *BinaryPatch) OldRead(originalFile, currentFile []byte, maxDuration time.Duration) { -// originalFileLen := len(originalFile) -// currentFileLen := len(currentFile) -// smallerFileLen := min(originalFileLen, currentFileLen) -// -// iteration := 0 -// currentIndex := 0 -// originalIndex := 0 -// -// // Omit same sequence -// omit := 0 -// for omit < smallerFileLen && currentFile[omit] == originalFile[omit] { -// omit++ -// } -// originalIndex += omit -// currentIndex += omit -// p.Preserve(omit) -// -// replaced := 0 -// ts := time.Now() -// -//loop: -// for { -// iteration++ -// leftCurrent := currentFileLen - currentIndex -// leftOriginal := originalFileLen - originalIndex -// leftMin := min(leftCurrent, leftOriginal) -// if leftMin <= BinaryPatchMinCommonSize { -// break -// } -// segment := min(leftMin, BinaryBatchBlockSize*BinaryBatchBlockFactor) - BinaryPatchMinCommonSize -// maxIterations := min(segment+replaced, leftMin) -// -// // Extract fast when duration passed -// if maxDuration != 0 && iteration%1000 == 0 && maxDuration < time.Since(ts) { -// p.Delete(originalFileLen - originalIndex) -// p.Add(currentFile[currentIndex:]) -// return -// } -// -// // Try recovering from an endless loop -// if maxIterations > BinaryBatchBlockSize*BinaryBatchBlockFactorMax { -// p.Add(currentFile[currentIndex : currentIndex+maxIterations/2]) -// replaced -= maxIterations / 2 -// currentIndex += maxIterations / 2 -// continue loop -// } -// -// // Find next match when adding to original file -// biggestCommon := 0 -// biggestCommonAt := 0 -// biggestCommonDel := false -// for i := 0; i < maxIterations; i++ { -// common := 0 -// for i+common < leftMin && currentFile[currentIndex+i+common] == originalFile[originalIndex+common] { -// common++ -// } -// if common > biggestCommon { -// biggestCommon = common -// biggestCommonAt = i -// biggestCommonDel = false -// } -// common = 0 -// for i+common < leftMin && currentFile[currentIndex+common] == originalFile[originalIndex+i+common] { -// common++ -// } -// if common > biggestCommon { -// biggestCommon = common -// biggestCommonAt = i -// biggestCommonDel = true -// } -// } -// -// if biggestCommon >= BinaryPatchMinCommonSize { -// if biggestCommonDel { -// p.Delete(biggestCommonAt) -// p.Preserve(biggestCommon) -// replaced += biggestCommonAt -// originalIndex += biggestCommonAt + biggestCommon -// currentIndex += biggestCommon -// } else { -// p.Add(currentFile[currentIndex : currentIndex+biggestCommonAt]) -// p.Preserve(biggestCommon) -// replaced -= biggestCommonAt -// currentIndex += biggestCommonAt + biggestCommon -// originalIndex += biggestCommon -// } -// continue loop -// } -// -// // Treat some part as deleted to proceed -// p.Delete(BinaryPatchMinCommonSize) -// replaced += BinaryPatchMinCommonSize -// originalIndex += BinaryPatchMinCommonSize -// } -// -// if currentIndex != currentFileLen { -// p.Add(currentFile[currentIndex:]) -// } else if originalIndex != originalFileLen { -// p.Preserve(originalFileLen - originalIndex) -// } -//} - func (p *BinaryPatch) Len() int { return p.buf.Len() } From a86ac21a3aa3bf6c32de901197afbf3e2a5ed4a8 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Wed, 30 Oct 2024 18:27:59 +0100 Subject: [PATCH 23/28] fix: make binary patch more stable --- .../devbox/devutils/binarypatch.go | 93 +++++++++++++------ 1 file changed, 66 insertions(+), 27 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go index bc61cae191..ac5bfd7b6d 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go @@ -20,6 +20,7 @@ const ( BinaryPatchAddOp = 1 BinaryPatchOriginalOp = 2 BinaryPatchDeleteOp = 3 + BinaryPatchRepeatOp = 4 BinaryBatchBlockSize = 100 BinaryBatchBlockFactor = 8 @@ -69,15 +70,20 @@ func (p *BinaryPatch) Load(data []byte) { } func (p *BinaryPatch) Read(originalFile, currentFile []byte, maxDuration time.Duration) { - size := int32(30) - step := int32(10) + skew := int32(60) + minReuse := int32(20) + step := skew / 2 ts := time.Now() originalMarkers := make([][]int32, math.MaxUint16+1) - originalIterations := int32(len(originalFile)) - size - for i := int32(0); i < originalIterations; i++ { - marker := uint16(originalFile[i]) | uint16(originalFile[i+size])<<8 + originalIterations := int32(len(originalFile)) - skew + for i := skew; i < originalIterations; i++ { + if originalFile[i] == 0 { + continue + } + // Approximate the marker + marker := uint16((int(originalFile[i-(skew/4)])+int(originalFile[i-(skew/2)]))/2) | uint16((int(originalFile[i+(skew/4)])+int(originalFile[i+(skew/2)]))/2)<<8 originalMarkers[marker] = append(originalMarkers[marker], i) } @@ -106,30 +112,36 @@ func (p *BinaryPatch) Read(originalFile, currentFile []byte, maxDuration time.Du totalSaved := 0 iterations := 0 - maxIndex := int32(len(currentFile)) - size + maxIndex := int32(len(currentFile)) - skew loop: - for ci := int32(0); ci < maxIndex; ci += step { - marker := uint16(currentFile[ci]) | uint16(currentFile[ci+size])<<8 - if len(originalMarkers[marker]) == 0 { - ci = ci - step/2 + for ci := skew; ci < maxIndex; { + iterations++ + if currentFile[ci] == 0 { + ci += step continue } - iterations++ + marker := uint16((int(currentFile[ci-(skew/4)])+int(currentFile[ci-(skew/2)]))/2) | uint16((int(currentFile[ci+(skew/4)])+int(currentFile[ci+(skew/2)]))/2)<<8 if maxDuration != 0 && iterations%1000 == 0 && time.Since(ts) > maxDuration { break } - if iterations%20000 == 0 && 100*ci/maxIndex < 50 { - step = step * 4 / 3 - ci -= step + if iterations%50000 == 0 { + step = step * 7 / 6 continue } + + nextCL := 0 + nextCR := 0 + nextOL := 0 for _, oi := range originalMarkers[marker] { - if (oi < step || ci < step || originalFile[oi-step] != currentFile[ci-step]) && (oi+step > oiMax || ci+step > ciMax || originalFile[oi+step] != currentFile[ci+step]) { + if currentFile[ci] != originalFile[oi] || + currentFile[ci+1] != originalFile[oi+1] || + currentFile[ci-1] != originalFile[oi-1] || + currentFile[ci-skew/2] != originalFile[oi-skew/2] || + currentFile[ci+skew/2] != originalFile[oi+skew/2] { continue } - // Validate exact range l, r := int32(0), int32(0) for ; ci-l > lastCi && oi-l > lastOi && originalFile[oi-l-1] == currentFile[ci-l-1]; l++ { @@ -137,15 +149,26 @@ loop: for ; oi+r < oiMax && ci+r < ciMax && originalFile[oi+r+1] == currentFile[ci+r+1]; r++ { } // Determine if it's nice - if l+r > 14 { - totalSaved += int(r + l + 1) - p.Add(currentFile[lastCi : ci-l]) - lastCi = ci + r + 1 - ci = lastCi + 1 - step - p.Original(int(oi-l), int(r+l+1)) - continue loop + if l+r+1 >= minReuse && nextCR-nextCL < int(r+l) { + nextCL = int(ci - l) + nextCR = int(ci + r) + nextOL = int(oi - l) + } + if l+r > skew { + break } } + + if nextCL != 0 || nextCR != 0 { + totalSaved += (nextCR - nextCL) + 1 + p.Add(currentFile[lastCi:nextCL]) + lastCi = int32(nextCR + 1) + p.Original(nextOL, (nextCR-nextCL)+1) + ci = lastCi + 1 + continue loop + } + + ci += step } p.Add(currentFile[lastCi:]) @@ -207,6 +230,18 @@ func (p *BinaryPatch) Add(bytesArr []byte) { p.buf.Write(bytesArr) } +func (p *BinaryPatch) Repeat(count int, b byte) { + if count == 0 { + return + } + p.lastOp = BinaryPatchRepeatOp + p.buf.WriteByte(BinaryPatchRepeatOp) + num := make([]byte, 4) + binary.LittleEndian.PutUint32(num, uint32(count)) + p.buf.Write(num) + p.buf.WriteByte(b) +} + func (p *BinaryPatch) Apply(original []byte) []byte { result := make([]byte, 0) patch := p.buf.Bytes() @@ -217,10 +252,14 @@ func (p *BinaryPatch) Apply(original []byte) []byte { count := binary.LittleEndian.Uint32(patch[i+5 : i+9]) result = append(result, original[index:index+count]...) i += 9 - //case BinaryPatchDeleteOp: - // count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) - // original = original[count:] - // i += 5 + case BinaryPatchRepeatOp: + count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) + buf := make([]byte, count) + for j := 0; j < int(count); j++ { + buf[j] = patch[i+5] + } + result = append(result, buf...) + i += 6 case BinaryPatchAddOp: count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) result = append(result, patch[i+5:i+5+int(count)]...) From cf4e37817b26ce92c07f2ca7058f44f379f78ca6 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Wed, 30 Oct 2024 18:31:24 +0100 Subject: [PATCH 24/28] chore: adjust a bit binary patch constants --- cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go index ac5bfd7b6d..5a80f7378a 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go @@ -70,8 +70,8 @@ func (p *BinaryPatch) Load(data []byte) { } func (p *BinaryPatch) Read(originalFile, currentFile []byte, maxDuration time.Duration) { - skew := int32(60) - minReuse := int32(20) + skew := int32(30) + minReuse := int32(12) step := skew / 2 ts := time.Now() From d1ee6d40872ef1e6f31fa5abbe8a8b973ac39f98 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Wed, 30 Oct 2024 18:37:23 +0100 Subject: [PATCH 25/28] chore: reuse buffer better --- .../devbox/devutils/binarypatch.go | 43 +++++++++++-------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go index 5a80f7378a..16bb756a58 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go @@ -34,6 +34,7 @@ const ( // It's working nicely for incremental builds though. type BinaryPatch struct { buf *bytes.Buffer + uintTmp []byte lastOp int lastCount int } @@ -45,7 +46,8 @@ type BinaryPatchThreshold struct { func NewBinaryPatch() *BinaryPatch { return &BinaryPatch{ - buf: bytes.NewBuffer(nil), + buf: bytes.NewBuffer(nil), + uintTmp: make([]byte, 4), } } @@ -74,6 +76,9 @@ func (p *BinaryPatch) Read(originalFile, currentFile []byte, maxDuration time.Du minReuse := int32(12) step := skew / 2 + binary.LittleEndian.PutUint32(p.uintTmp, uint32(len(currentFile))) + p.buf.Write(p.uintTmp) + ts := time.Now() originalMarkers := make([][]int32, math.MaxUint16+1) @@ -184,11 +189,10 @@ func (p *BinaryPatch) Original(index, bytesCount int) { } p.lastOp = BinaryPatchOriginalOp p.buf.WriteByte(BinaryPatchOriginalOp) - num := make([]byte, 4) - binary.LittleEndian.PutUint32(num, uint32(index)) - p.buf.Write(num) - binary.LittleEndian.PutUint32(num, uint32(bytesCount)) - p.buf.Write(num) + binary.LittleEndian.PutUint32(p.uintTmp, uint32(index)) + p.buf.Write(p.uintTmp) + binary.LittleEndian.PutUint32(p.uintTmp, uint32(bytesCount)) + p.buf.Write(p.uintTmp) } func (p *BinaryPatch) Delete(bytesCount int) { @@ -204,9 +208,8 @@ func (p *BinaryPatch) Delete(bytesCount int) { p.lastOp = BinaryPatchDeleteOp p.lastCount = bytesCount p.buf.WriteByte(BinaryPatchDeleteOp) - num := make([]byte, 4) - binary.LittleEndian.PutUint32(num, uint32(bytesCount)) - p.buf.Write(num) + binary.LittleEndian.PutUint32(p.uintTmp, uint32(bytesCount)) + p.buf.Write(p.uintTmp) } func (p *BinaryPatch) Add(bytesArr []byte) { @@ -224,9 +227,8 @@ func (p *BinaryPatch) Add(bytesArr []byte) { p.lastOp = BinaryPatchAddOp p.lastCount = len(bytesArr) p.buf.WriteByte(BinaryPatchAddOp) - num := make([]byte, 4) - binary.LittleEndian.PutUint32(num, uint32(len(bytesArr))) - p.buf.Write(num) + binary.LittleEndian.PutUint32(p.uintTmp, uint32(len(bytesArr))) + p.buf.Write(p.uintTmp) p.buf.Write(bytesArr) } @@ -236,21 +238,23 @@ func (p *BinaryPatch) Repeat(count int, b byte) { } p.lastOp = BinaryPatchRepeatOp p.buf.WriteByte(BinaryPatchRepeatOp) - num := make([]byte, 4) - binary.LittleEndian.PutUint32(num, uint32(count)) - p.buf.Write(num) + binary.LittleEndian.PutUint32(p.uintTmp, uint32(count)) + p.buf.Write(p.uintTmp) p.buf.WriteByte(b) } func (p *BinaryPatch) Apply(original []byte) []byte { - result := make([]byte, 0) patch := p.buf.Bytes() - for i := 0; i < len(patch); { + size := binary.LittleEndian.Uint32(patch[0:4]) + result := make([]byte, size) + resultIndex := uint32(0) + for i := 4; i < len(patch); { switch patch[i] { case BinaryPatchOriginalOp: index := binary.LittleEndian.Uint32(patch[i+1 : i+5]) count := binary.LittleEndian.Uint32(patch[i+5 : i+9]) - result = append(result, original[index:index+count]...) + copy(result[resultIndex:], original[index:index+count]) + resultIndex += count i += 9 case BinaryPatchRepeatOp: count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) @@ -262,8 +266,9 @@ func (p *BinaryPatch) Apply(original []byte) []byte { i += 6 case BinaryPatchAddOp: count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) - result = append(result, patch[i+5:i+5+int(count)]...) + copy(result[resultIndex:], patch[i+5:i+5+int(count)]) i += 5 + int(count) + resultIndex += count } } return result From 4a32325de218366937a8512ff02338e2477157d0 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Thu, 31 Oct 2024 14:09:09 +0100 Subject: [PATCH 26/28] fix: corner cases where binary patch was stuck --- .../devbox/devutils/binarypatch.go | 374 ++++++++++++------ 1 file changed, 244 insertions(+), 130 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go index 16bb756a58..c01c73ebf6 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go @@ -16,16 +16,11 @@ import ( "time" ) +type BinaryPatchOpType = byte + const ( - BinaryPatchAddOp = 1 - BinaryPatchOriginalOp = 2 - BinaryPatchDeleteOp = 3 - BinaryPatchRepeatOp = 4 - - BinaryBatchBlockSize = 100 - BinaryBatchBlockFactor = 8 - BinaryBatchBlockFactorMax = 50 - BinaryPatchMinCommonSize = 8 + BinaryPatchAddOpType BinaryPatchOpType = 1 + BinaryPatchOriginalOpType BinaryPatchOpType = 2 ) // BinaryPatch is helper to avoid sending the whole binaries. @@ -35,7 +30,7 @@ const ( type BinaryPatch struct { buf *bytes.Buffer uintTmp []byte - lastOp int + lastOp BinaryPatchOpType lastCount int } @@ -72,177 +67,171 @@ func (p *BinaryPatch) Load(data []byte) { } func (p *BinaryPatch) Read(originalFile, currentFile []byte, maxDuration time.Duration) { - skew := int32(30) - minReuse := int32(12) + skew := uint32(50) + minReuse := uint32(20) + reasonableReuse := uint32(128) step := skew / 2 - binary.LittleEndian.PutUint32(p.uintTmp, uint32(len(currentFile))) - p.buf.Write(p.uintTmp) + ops := &BinaryPatchOpList{} ts := time.Now() - originalMarkers := make([][]int32, math.MaxUint16+1) - originalIterations := int32(len(originalFile)) - skew - for i := skew; i < originalIterations; i++ { + originalMarkers := make([][]uint32, math.MaxUint16+1) + originalIterations := uint32(len(originalFile)) - skew + + for i := skew; i < originalIterations; { if originalFile[i] == 0 { + i++ continue } // Approximate the marker marker := uint16((int(originalFile[i-(skew/4)])+int(originalFile[i-(skew/2)]))/2) | uint16((int(originalFile[i+(skew/4)])+int(originalFile[i+(skew/2)]))/2)<<8 originalMarkers[marker] = append(originalMarkers[marker], i) + i++ } // Delete most popular characters to avoid problems with too many iterations - lenSum := 0 - for i := uint16(0); i < math.MaxUint16; i++ { - lenSum += len(originalMarkers[i]) + sizes := make([]int, len(originalMarkers)) + for i := 0; i < len(originalMarkers); i++ { + sizes[i] = len(originalMarkers[i]) } - lenAvg := lenSum / len(originalMarkers) - for i := uint16(0); i < math.MaxUint16; i++ { - if len(originalMarkers[i]) > lenAvg { - originalMarkers[i] = nil - } + slices.Sort(sizes) + total := 0 + for i := range originalMarkers { + total += len(originalMarkers[i]) } + current := total + clearTopMarkers := func(percentage int) { + percentage = max(100-max(0, percentage), 0) + keep := total * percentage / 100 + i := 0 + for ; i < len(sizes) && keep > 0; i++ { + keep -= sizes[i] + } + if i == len(sizes) { + i-- + } + maxMarkersCount := sizes[i] + for j := i + 1; j < len(sizes); j++ { + sizes[j] = 0 + } - // Sort all the markers - for i := uint16(0); i < math.MaxUint16; i++ { - slices.Sort(originalMarkers[i]) + for i := uint16(0); i < math.MaxUint16; i++ { + if len(originalMarkers[i]) > maxMarkersCount { + current -= len(originalMarkers[i]) + originalMarkers[i] = nil + } + } } + clearTopMarkers(10) - ciMax := int32(len(currentFile) - 1) - oiMax := int32(len(originalFile) - 1) + ciMax := uint32(len(currentFile) - 1) + oiMax := uint32(len(originalFile) - 1) - lastOi := int32(0) - lastCi := int32(0) - totalSaved := 0 - iterations := 0 + lastCi := uint32(0) + iterations := uint32(0) + speedUps := 0 - maxIndex := int32(len(currentFile)) - skew + maxIndex := uint32(len(currentFile)) - skew + tt := time.Now().Add(200 * time.Millisecond) loop: - for ci := skew; ci < maxIndex; { - iterations++ + for ci := skew / 2; ci < maxIndex; { if currentFile[ci] == 0 { - ci += step + ci++ continue } + + // Find most unique marker in current step marker := uint16((int(currentFile[ci-(skew/4)])+int(currentFile[ci-(skew/2)]))/2) | uint16((int(currentFile[ci+(skew/4)])+int(currentFile[ci+(skew/2)]))/2)<<8 + bestCi := ci + markerLen := len(originalMarkers[marker]) + for i := uint32(0); i < step && ci+i <= maxIndex; i++ { + if currentFile[ci+i] == 0 { + continue + } + currentMarker := uint16((int(currentFile[ci+i-(skew/4)])+int(currentFile[ci+i-(skew/2)]))/2) | uint16((int(currentFile[ci+i+(skew/4)])+int(currentFile[ci+i+(skew/2)]))/2)<<8 + currentMarkerLen := len(originalMarkers[currentMarker]) + if currentMarkerLen != 0 && currentMarkerLen < markerLen { + marker = currentMarker + markerLen = currentMarkerLen + bestCi = ci + i + } + } + ci = bestCi + + iterations++ if maxDuration != 0 && iterations%1000 == 0 && time.Since(ts) > maxDuration { break } - if iterations%50000 == 0 { - step = step * 7 / 6 - continue + if time.Since(tt) > 30*time.Millisecond { + speedUps++ + if speedUps == 2 { + step = skew * 3 / 4 + } + if speedUps == 5 { + step = skew + } + clearTopMarkers(20 + speedUps*5) + tt = time.Now() } - nextCL := 0 - nextCR := 0 - nextOL := 0 + lastOR := uint32(0) + nextCL := uint32(0) + nextCR := uint32(0) + nextOL := uint32(0) for _, oi := range originalMarkers[marker] { - if currentFile[ci] != originalFile[oi] || + if lastOR >= oi || + currentFile[ci] != originalFile[oi] || currentFile[ci+1] != originalFile[oi+1] || - currentFile[ci-1] != originalFile[oi-1] || currentFile[ci-skew/2] != originalFile[oi-skew/2] || currentFile[ci+skew/2] != originalFile[oi+skew/2] { continue } // Validate exact range - l, r := int32(0), int32(0) - for ; ci-l > lastCi && oi-l > lastOi && originalFile[oi-l-1] == currentFile[ci-l-1]; l++ { - } + l, r := uint32(0), uint32(0) for ; oi+r < oiMax && ci+r < ciMax && originalFile[oi+r+1] == currentFile[ci+r+1]; r++ { } + for ; ci-l > 0 && oi-l > 0 && originalFile[oi-l-1] == currentFile[ci-l-1]; l++ { + } + lastOR = oi + r // Determine if it's nice - if l+r+1 >= minReuse && nextCR-nextCL < int(r+l) { - nextCL = int(ci - l) - nextCR = int(ci + r) - nextOL = int(oi - l) + if l+r+1 >= minReuse && nextCR-nextCL < r+l { + nextCL = ci - l + nextCR = ci + r + nextOL = oi - l } - if l+r > skew { + if l+r > reasonableReuse { break } } if nextCL != 0 || nextCR != 0 { - totalSaved += (nextCR - nextCL) + 1 - p.Add(currentFile[lastCi:nextCL]) - lastCi = int32(nextCR + 1) - p.Original(nextOL, (nextCR-nextCL)+1) - ci = lastCi + 1 + addLength := int32(nextCL) - int32(lastCi) + if addLength < 0 { + ops.Cut(uint32(-addLength)) + } else { + ops.Add(currentFile[lastCi:nextCL]) + } + lastCi = nextCR + 1 + ops.Original(nextOL, nextCR-nextCL+1) + ci = lastCi + step continue loop } ci += step } - p.Add(currentFile[lastCi:]) + ops.Add(currentFile[lastCi:]) + + p.buf = bytes.NewBuffer(ops.Bytes()) } func (p *BinaryPatch) Len() int { return p.buf.Len() } -func (p *BinaryPatch) Original(index, bytesCount int) { - if bytesCount == 0 { - return - } - p.lastOp = BinaryPatchOriginalOp - p.buf.WriteByte(BinaryPatchOriginalOp) - binary.LittleEndian.PutUint32(p.uintTmp, uint32(index)) - p.buf.Write(p.uintTmp) - binary.LittleEndian.PutUint32(p.uintTmp, uint32(bytesCount)) - p.buf.Write(p.uintTmp) -} - -func (p *BinaryPatch) Delete(bytesCount int) { - if bytesCount == 0 { - return - } - if p.lastOp == BinaryPatchDeleteOp { - p.lastCount += bytesCount - b := p.buf.Bytes() - binary.LittleEndian.PutUint32(b[len(b)-4:], uint32(p.lastCount)) - return - } - p.lastOp = BinaryPatchDeleteOp - p.lastCount = bytesCount - p.buf.WriteByte(BinaryPatchDeleteOp) - binary.LittleEndian.PutUint32(p.uintTmp, uint32(bytesCount)) - p.buf.Write(p.uintTmp) -} - -func (p *BinaryPatch) Add(bytesArr []byte) { - if len(bytesArr) == 0 { - return - } - if p.lastOp == BinaryPatchAddOp { - b := p.buf.Bytes() - nextCount := p.lastCount + len(bytesArr) - binary.LittleEndian.PutUint32(b[len(b)-p.lastCount-4:], uint32(nextCount)) - p.buf.Write(bytesArr) - p.lastCount = nextCount - return - } - p.lastOp = BinaryPatchAddOp - p.lastCount = len(bytesArr) - p.buf.WriteByte(BinaryPatchAddOp) - binary.LittleEndian.PutUint32(p.uintTmp, uint32(len(bytesArr))) - p.buf.Write(p.uintTmp) - p.buf.Write(bytesArr) -} - -func (p *BinaryPatch) Repeat(count int, b byte) { - if count == 0 { - return - } - p.lastOp = BinaryPatchRepeatOp - p.buf.WriteByte(BinaryPatchRepeatOp) - binary.LittleEndian.PutUint32(p.uintTmp, uint32(count)) - p.buf.Write(p.uintTmp) - p.buf.WriteByte(b) -} - func (p *BinaryPatch) Apply(original []byte) []byte { patch := p.buf.Bytes() size := binary.LittleEndian.Uint32(patch[0:4]) @@ -250,21 +239,13 @@ func (p *BinaryPatch) Apply(original []byte) []byte { resultIndex := uint32(0) for i := 4; i < len(patch); { switch patch[i] { - case BinaryPatchOriginalOp: + case BinaryPatchOriginalOpType: index := binary.LittleEndian.Uint32(patch[i+1 : i+5]) count := binary.LittleEndian.Uint32(patch[i+5 : i+9]) copy(result[resultIndex:], original[index:index+count]) resultIndex += count i += 9 - case BinaryPatchRepeatOp: - count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) - buf := make([]byte, count) - for j := 0; j < int(count); j++ { - buf[j] = patch[i+5] - } - result = append(result, buf...) - i += 6 - case BinaryPatchAddOp: + case BinaryPatchAddOpType: count := binary.LittleEndian.Uint32(patch[i+1 : i+5]) copy(result[resultIndex:], patch[i+5:i+5+int(count)]) i += 5 + int(count) @@ -273,3 +254,136 @@ func (p *BinaryPatch) Apply(original []byte) []byte { } return result } + +type BinaryPatchOp struct { + op BinaryPatchOpType + val1 uint32 + val2 uint32 + content []byte +} + +func (b *BinaryPatchOp) Cut(bytesCount uint32) (nextOp *BinaryPatchOp, left uint32) { + if bytesCount == 0 { + return b, 0 + } + switch b.op { + case BinaryPatchOriginalOpType: + if bytesCount >= b.val2 { + return nil, bytesCount - b.val2 + } + b.val2 -= bytesCount + return b, 0 + case BinaryPatchAddOpType: + size := uint32(len(b.content)) + if bytesCount >= size { + return nil, bytesCount - size + } + b.content = b.content[0 : size-bytesCount] + return b, 0 + } + return nil, bytesCount +} + +func (b *BinaryPatchOp) TargetSize() uint32 { + switch b.op { + case BinaryPatchOriginalOpType: + return b.val2 + case BinaryPatchAddOpType: + return uint32(len(b.content)) + } + return 0 +} + +func (b *BinaryPatchOp) PatchSize() uint32 { + switch b.op { + case BinaryPatchOriginalOpType: + return 9 // byte + uint32 + uint32 + case BinaryPatchAddOpType: + return 5 + uint32(len(b.content)) // byte + uint32 + []byte(content) + } + return 0 +} + +type BinaryPatchOpList struct { + ops []BinaryPatchOp + count int +} + +func (b *BinaryPatchOpList) TargetSize() uint32 { + total := uint32(0) + for i := 0; i < b.count; i++ { + total += b.ops[i].TargetSize() + } + return total +} + +func (b *BinaryPatchOpList) PatchSize() uint32 { + total := uint32(4) // uint32 for file size + for i := 0; i < b.count; i++ { + total += b.ops[i].PatchSize() + } + return total +} + +func (b *BinaryPatchOpList) Cut(bytesCount uint32) uint32 { + var next *BinaryPatchOp + for i := b.count - 1; bytesCount > 0 && i >= 0; i-- { + next, bytesCount = b.ops[i].Cut(bytesCount) + if next == nil { + b.count-- + b.ops[i] = BinaryPatchOp{} + } + } + return bytesCount +} + +func (b *BinaryPatchOpList) Bytes() []byte { + targetSize := b.TargetSize() + + // Prepare buffer for the patch + result := make([]byte, b.PatchSize()) + binary.LittleEndian.PutUint32(result, targetSize) + resultIndex := 4 + + // Include all patches + for i := 0; i < b.count; i++ { + switch b.ops[i].op { + case BinaryPatchOriginalOpType: + result[resultIndex] = BinaryPatchOriginalOpType + binary.LittleEndian.PutUint32(result[resultIndex+1:], b.ops[i].val1) + binary.LittleEndian.PutUint32(result[resultIndex+5:], b.ops[i].val2) + resultIndex += 9 + case BinaryPatchAddOpType: + result[resultIndex] = BinaryPatchAddOpType + binary.LittleEndian.PutUint32(result[resultIndex+1:], uint32(len(b.ops[i].content))) + copy(result[resultIndex+5:], b.ops[i].content) + resultIndex += 5 + len(b.ops[i].content) + } + } + return result +} + +func (b *BinaryPatchOpList) Original(index, bytesCount uint32) { + if bytesCount == 0 { + return + } + + b.append(BinaryPatchOp{op: BinaryPatchOriginalOpType, val1: index, val2: bytesCount}) +} + +func (b *BinaryPatchOpList) Add(bytesArr []byte) { + if len(bytesArr) == 0 { + return + } + + b.append(BinaryPatchOp{op: BinaryPatchAddOpType, content: bytesArr}) +} + +func (b *BinaryPatchOpList) append(op BinaryPatchOp) { + // Grow if needed + if len(b.ops) <= b.count { + b.ops = append(b.ops, make([]BinaryPatchOp, 100)...) + } + b.ops[b.count] = op + b.count++ +} From 4084253efbfd7e4bc9a6a72fb109fb6736f1cd4c Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Thu, 31 Oct 2024 14:36:12 +0100 Subject: [PATCH 27/28] fixup lint --- .../devbox/devutils/binarypatch.go | 8 +--- .../devbox/devutils/binarystorage.go | 48 ++++++++++--------- 2 files changed, 27 insertions(+), 29 deletions(-) diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go index c01c73ebf6..fcbd1ef45b 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binarypatch.go @@ -28,10 +28,7 @@ const ( // so the resulting patch may be bigger than it's needed. // It's working nicely for incremental builds though. type BinaryPatch struct { - buf *bytes.Buffer - uintTmp []byte - lastOp BinaryPatchOpType - lastCount int + buf *bytes.Buffer } type BinaryPatchThreshold struct { @@ -41,8 +38,7 @@ type BinaryPatchThreshold struct { func NewBinaryPatch() *BinaryPatch { return &BinaryPatch{ - buf: bytes.NewBuffer(nil), - uintTmp: make([]byte, 4), + buf: bytes.NewBuffer(nil), } } diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/binarystorage.go b/cmd/tcl/kubectl-testkube/devbox/devutils/binarystorage.go index c7b81c1cb1..a2e4c4941f 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/binarystorage.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/binarystorage.go @@ -238,33 +238,35 @@ func (r *BinaryStorage) upload(ctx context.Context, name string, binary *Binary) if binary.hash != "" && binary.prevHash != "" && r.Is(name, binary.prevHash) { contents, err := binary.patch() - gzipContents := bytes.NewBuffer(nil) - gz := gzip.NewWriter(gzipContents) - io.Copy(gz, bytes.NewBuffer(contents)) - gz.Flush() - gz.Close() + if err == nil { + gzipContents := bytes.NewBuffer(nil) + gz := gzip.NewWriter(gzipContents) + io.Copy(gz, bytes.NewBuffer(contents)) + gz.Flush() + gz.Close() - gzipContentsLen := gzipContents.Len() - req, err := http.NewRequestWithContext(ctx, http.MethodPatch, fmt.Sprintf("http://localhost:%d/%s", r.localPort, name), gzipContents) - if err != nil { - if ctx.Err() != nil { - return 0, err - } - fmt.Printf("error while sending %s patch, fallback to full stream: %s\n", name, err) - } else { - req.ContentLength = int64(gzipContentsLen) - req.Header.Set("Content-Encoding", "gzip") - req.Header.Set("X-Prev-Hash", binary.prevHash) - req.Header.Set("X-Hash", binary.hash) - res, err := client.Do(req) + gzipContentsLen := gzipContents.Len() + req, err := http.NewRequestWithContext(ctx, http.MethodPatch, fmt.Sprintf("http://localhost:%d/%s", r.localPort, name), gzipContents) if err != nil { + if ctx.Err() != nil { + return 0, err + } fmt.Printf("error while sending %s patch, fallback to full stream: %s\n", name, err) - } else if res.StatusCode != http.StatusOK { - b, _ := io.ReadAll(res.Body) - fmt.Printf("error while sending %s patch, fallback to full stream: status code: %s, message: %s\n", name, res.Status, string(b)) } else { - r.SetHash(name, binary.hash) - return gzipContentsLen, nil + req.ContentLength = int64(gzipContentsLen) + req.Header.Set("Content-Encoding", "gzip") + req.Header.Set("X-Prev-Hash", binary.prevHash) + req.Header.Set("X-Hash", binary.hash) + res, err := client.Do(req) + if err != nil { + fmt.Printf("error while sending %s patch, fallback to full stream: %s\n", name, err) + } else if res.StatusCode != http.StatusOK { + b, _ := io.ReadAll(res.Body) + fmt.Printf("error while sending %s patch, fallback to full stream: status code: %s, message: %s\n", name, res.Status, string(b)) + } else { + r.SetHash(name, binary.hash) + return gzipContentsLen, nil + } } } } From fff985d1fcb237c053cbd5712c22f71a79623f94 Mon Sep 17 00:00:00 2001 From: Dawid Rusnak Date: Thu, 31 Oct 2024 15:01:54 +0100 Subject: [PATCH 28/28] fix: avoid unnecessary container for agent in devbox --- cmd/tcl/devbox-mutating-webhook/main.go | 3 --- .../kubectl-testkube/devbox/devutils/agent.go | 20 +++++-------------- 2 files changed, 5 insertions(+), 18 deletions(-) diff --git a/cmd/tcl/devbox-mutating-webhook/main.go b/cmd/tcl/devbox-mutating-webhook/main.go index d406d3b835..4fa8a51509 100644 --- a/cmd/tcl/devbox-mutating-webhook/main.go +++ b/cmd/tcl/devbox-mutating-webhook/main.go @@ -90,7 +90,6 @@ func main() { set -e /.tktw-bin/wget -O /.tk-devbox/init http://devbox-binary:8080/init || exit 1 chmod 777 /.tk-devbox/init - chmod +x /.tk-devbox/init ls -lah /.tk-devbox` if usesToolkit { script = ` @@ -99,8 +98,6 @@ func main() { /.tktw-bin/wget -O /.tk-devbox/toolkit http://devbox-binary:8080/toolkit || exit 1 chmod 777 /.tk-devbox/init chmod 777 /.tk-devbox/toolkit - chmod +x /.tk-devbox/init - chmod +x /.tk-devbox/toolkit ls -lah /.tk-devbox` } diff --git a/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go b/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go index 384ace4d3f..c0440f33c6 100644 --- a/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go +++ b/cmd/tcl/kubectl-testkube/devbox/devutils/agent.go @@ -50,26 +50,16 @@ func (r *Agent) Create(ctx context.Context, env *client.Environment) error { {Name: "devbox", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, }, ServiceAccountName: "devbox-account", - InitContainers: []corev1.Container{{ - Name: "devbox-init", - Image: "busybox:1.36.1-musl", - ImagePullPolicy: corev1.PullIfNotPresent, - Command: []string{"/bin/sh", "-c"}, - Args: []string{` - /bin/wget -O /.tk-devbox/testkube-api-server http://devbox-binary:8080/testkube-api-server || exit 1 - chmod 777 /.tk-devbox/testkube-api-server - chmod +x /.tk-devbox/testkube-api-server - ls -lah /.tk-devbox`}, - VolumeMounts: []corev1.VolumeMount{ - {Name: "devbox", MountPath: "/.tk-devbox"}, - }, - }}, Containers: []corev1.Container{ { Name: "server", Image: r.agentImage, ImagePullPolicy: corev1.PullIfNotPresent, - Command: []string{"/.tk-devbox/testkube-api-server"}, + Command: []string{"/bin/sh", "-c"}, + Args: []string{` + wget -O /.tk-devbox/testkube-api-server http://devbox-binary:8080/testkube-api-server || exit 1 + chmod 777 /.tk-devbox/testkube-api-server + exec /.tk-devbox/testkube-api-server`}, Env: []corev1.EnvVar{ {Name: "NATS_EMBEDDED", Value: "true"}, {Name: "APISERVER_PORT", Value: "8088"},