From 1ec0112ddd04942298bc189514cd8ea4c16ac0dc Mon Sep 17 00:00:00 2001 From: facchettos Date: Wed, 22 May 2024 14:57:30 +0200 Subject: [PATCH] removed references to loftctl repo, copied the files from over there --- cmd/vclusterctl/cmd/platform/access_key.go | 2 +- cmd/vclusterctl/cmd/platform/add/cluster.go | 8 +- .../cmd/platform/connect/cluster.go | 6 +- .../cmd/platform/connect/connect.go | 4 +- cmd/vclusterctl/cmd/platform/get/cluster.go | 6 +- cmd/vclusterctl/cmd/platform/import.go | 2 +- cmd/vclusterctl/cmd/platform/list/clusters.go | 2 +- cmd/vclusterctl/cmd/platform/pro.go | 4 +- pkg/clihelper/clihelper.go | 774 +++++++++++ pkg/kube/client.go | 54 + pkg/kubeconfig/kubeconfig.go | 266 ++++ pkg/loftclient/client.go | 628 +++++++++ pkg/loftclient/config.go | 63 + pkg/loftclient/helper/helper.go | 1160 +++++++++++++++++ pkg/loftclient/naming/naming.go | 24 + pkg/loftconfig/variables.go | 21 + pkg/loftutils/positional_args.go | 69 + pkg/loftutils/positional_args_test.go | 55 + pkg/loftutils/util.go | 26 + pkg/platformdefaults/defaults.go | 112 ++ 20 files changed, 3269 insertions(+), 17 deletions(-) create mode 100644 pkg/clihelper/clihelper.go create mode 100644 pkg/kube/client.go create mode 100644 pkg/kubeconfig/kubeconfig.go create mode 100644 pkg/loftclient/client.go create mode 100644 pkg/loftclient/config.go create mode 100644 pkg/loftclient/helper/helper.go create mode 100644 pkg/loftclient/naming/naming.go create mode 100644 pkg/loftconfig/variables.go create mode 100644 pkg/loftutils/positional_args.go create mode 100644 pkg/loftutils/positional_args_test.go create mode 100644 pkg/loftutils/util.go create mode 100644 pkg/platformdefaults/defaults.go diff --git a/cmd/vclusterctl/cmd/platform/access_key.go b/cmd/vclusterctl/cmd/platform/access_key.go index d6946b1905..75a8d2b4b4 100644 --- a/cmd/vclusterctl/cmd/platform/access_key.go +++ b/cmd/vclusterctl/cmd/platform/access_key.go @@ -7,9 +7,9 @@ import ( "os" "github.com/loft-sh/api/v4/pkg/product" - "github.com/loft-sh/loftctl/v4/pkg/client" "github.com/loft-sh/log" "github.com/loft-sh/vcluster/pkg/cli/flags" + client "github.com/loft-sh/vcluster/pkg/loftclient" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" diff --git a/cmd/vclusterctl/cmd/platform/add/cluster.go b/cmd/vclusterctl/cmd/platform/add/cluster.go index 3a1bf18583..bf618bc158 100644 --- a/cmd/vclusterctl/cmd/platform/add/cluster.go +++ b/cmd/vclusterctl/cmd/platform/add/cluster.go @@ -8,11 +8,11 @@ import ( "os/exec" "time" - "github.com/loft-sh/loftctl/v4/pkg/client" - "github.com/loft-sh/loftctl/v4/pkg/client/helper" - "github.com/loft-sh/loftctl/v4/pkg/clihelper" - "github.com/loft-sh/loftctl/v4/pkg/kube" "github.com/loft-sh/log" + "github.com/loft-sh/vcluster/pkg/clihelper" + "github.com/loft-sh/vcluster/pkg/kube" + client "github.com/loft-sh/vcluster/pkg/loftclient" + "github.com/loft-sh/vcluster/pkg/loftclient/helper" "github.com/sirupsen/logrus" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/wait" diff --git a/cmd/vclusterctl/cmd/platform/connect/cluster.go b/cmd/vclusterctl/cmd/platform/connect/cluster.go index ce4a94a234..8548a4e662 100644 --- a/cmd/vclusterctl/cmd/platform/connect/cluster.go +++ b/cmd/vclusterctl/cmd/platform/connect/cluster.go @@ -9,11 +9,11 @@ import ( managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" "github.com/loft-sh/api/v4/pkg/product" - "github.com/loft-sh/loftctl/v4/pkg/client" - "github.com/loft-sh/loftctl/v4/pkg/client/helper" - "github.com/loft-sh/loftctl/v4/pkg/kubeconfig" "github.com/loft-sh/log" "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/kubeconfig" + client "github.com/loft-sh/vcluster/pkg/loftclient" + "github.com/loft-sh/vcluster/pkg/loftclient/helper" "github.com/loft-sh/vcluster/pkg/upgrade" "github.com/mgutz/ansi" "github.com/spf13/cobra" diff --git a/cmd/vclusterctl/cmd/platform/connect/connect.go b/cmd/vclusterctl/cmd/platform/connect/connect.go index 45d34fade1..33c621adae 100644 --- a/cmd/vclusterctl/cmd/platform/connect/connect.go +++ b/cmd/vclusterctl/cmd/platform/connect/connect.go @@ -2,13 +2,13 @@ package connect import ( "github.com/loft-sh/api/v4/pkg/product" - pdefaults "github.com/loft-sh/loftctl/v4/pkg/defaults" "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/platformdefaults" "github.com/spf13/cobra" ) // NewConnectCmd creates a new cobra command -func NewConnectCmd(globalFlags *flags.GlobalFlags, _ *pdefaults.Defaults) *cobra.Command { +func NewConnectCmd(globalFlags *flags.GlobalFlags, _ *platformdefaults.Defaults) *cobra.Command { description := product.ReplaceWithHeader("use", ` Activates a kube context for the given cluster / space / vcluster / management. diff --git a/cmd/vclusterctl/cmd/platform/get/cluster.go b/cmd/vclusterctl/cmd/platform/get/cluster.go index 99ab0358ea..85b4344e9b 100644 --- a/cmd/vclusterctl/cmd/platform/get/cluster.go +++ b/cmd/vclusterctl/cmd/platform/get/cluster.go @@ -8,10 +8,10 @@ import ( "time" managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" - "github.com/loft-sh/loftctl/v4/pkg/client" - "github.com/loft-sh/loftctl/v4/pkg/client/naming" - "github.com/loft-sh/loftctl/v4/pkg/config" "github.com/loft-sh/vcluster/pkg/cli/flags" + client "github.com/loft-sh/vcluster/pkg/loftclient" + "github.com/loft-sh/vcluster/pkg/loftclient/naming" + config "github.com/loft-sh/vcluster/pkg/loftconfig" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" diff --git a/cmd/vclusterctl/cmd/platform/import.go b/cmd/vclusterctl/cmd/platform/import.go index 1d2a6d4b89..93436ed192 100644 --- a/cmd/vclusterctl/cmd/platform/import.go +++ b/cmd/vclusterctl/cmd/platform/import.go @@ -3,10 +3,10 @@ package platform import ( "context" - loftctlUtil "github.com/loft-sh/loftctl/v4/pkg/util" "github.com/loft-sh/log" "github.com/loft-sh/vcluster/pkg/cli" "github.com/loft-sh/vcluster/pkg/cli/flags" + loftctlUtil "github.com/loft-sh/vcluster/pkg/loftutils" "github.com/loft-sh/vcluster/pkg/platform" "github.com/spf13/cobra" ) diff --git a/cmd/vclusterctl/cmd/platform/list/clusters.go b/cmd/vclusterctl/cmd/platform/list/clusters.go index ddfe77e627..c942d3bd0c 100644 --- a/cmd/vclusterctl/cmd/platform/list/clusters.go +++ b/cmd/vclusterctl/cmd/platform/list/clusters.go @@ -5,10 +5,10 @@ import ( "time" "github.com/loft-sh/api/v4/pkg/product" - "github.com/loft-sh/loftctl/v4/pkg/client" "github.com/loft-sh/log" "github.com/loft-sh/log/table" "github.com/loft-sh/vcluster/pkg/cli/flags" + client "github.com/loft-sh/vcluster/pkg/loftclient" "github.com/loft-sh/vcluster/pkg/upgrade" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/cmd/vclusterctl/cmd/platform/pro.go b/cmd/vclusterctl/cmd/platform/pro.go index 3faa3320ec..6510fa2dbe 100644 --- a/cmd/vclusterctl/cmd/platform/pro.go +++ b/cmd/vclusterctl/cmd/platform/pro.go @@ -3,10 +3,10 @@ package platform import ( "fmt" - "github.com/loft-sh/loftctl/v4/pkg/defaults" "github.com/loft-sh/log" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/connect" "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/platformdefaults" "github.com/spf13/cobra" ) @@ -27,7 +27,7 @@ Deprecated, please use vcluster platform instead if err != nil { return nil, fmt.Errorf("failed to create vcluster pro start command: %w", err) } - d, err := defaults.NewFromPath(defaults.ConfigFolder, defaults.ConfigFile) + d, err := platformdefaults.NewFromPath(platformdefaults.ConfigFolder, platformdefaults.ConfigFile) if err != nil { log.Default.Debugf(err.Error()) } diff --git a/pkg/clihelper/clihelper.go b/pkg/clihelper/clihelper.go new file mode 100644 index 0000000000..5351e4235c --- /dev/null +++ b/pkg/clihelper/clihelper.go @@ -0,0 +1,774 @@ +package clihelper + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "sort" + "strconv" + "strings" + "time" + + clusterv1 "github.com/loft-sh/agentapi/v4/pkg/apis/loft/cluster/v1" + storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/httputil" + "github.com/sirupsen/logrus" + + jsonpatch "github.com/evanphx/json-patch" + loftclientset "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset" + "github.com/loft-sh/loftctl/v4/pkg/config" + "github.com/loft-sh/loftctl/v4/pkg/portforward" + "github.com/loft-sh/log" + "github.com/loft-sh/log/survey" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/transport/spdy" + "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" +) + +// CriticalStatus container status +var CriticalStatus = map[string]bool{ + "Error": true, + "Unknown": true, + "ImagePullBackOff": true, + "CrashLoopBackOff": true, + "RunContainerError": true, + "ErrImagePull": true, + "CreateContainerConfigError": true, + "InvalidImageName": true, +} + +const defaultReleaseName = "loft" + +const LoftRouterDomainSecret = "loft-router-domain" + +var defaultDeploymentName = "loft" + +func GetDisplayName(name string, displayName string) string { + if displayName != "" { + return displayName + } + + return name +} + +func GetTableDisplayName(name string, displayName string) string { + if displayName != "" && displayName != name { + return displayName + " (" + name + ")" + } + + return name +} + +func DisplayName(entityInfo *clusterv1.EntityInfo) string { + if entityInfo == nil { + return "" + } else if entityInfo.DisplayName != "" { + return entityInfo.DisplayName + } else if entityInfo.Username != "" { + return entityInfo.Username + } + + return entityInfo.Name +} + +func GetLoftIngressHost(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (string, error) { + ingress, err := kubeClient.NetworkingV1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + if err != nil { + ingress, err := kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + if err != nil { + return "", err + } else { + // find host + for _, rule := range ingress.Spec.Rules { + return rule.Host, nil + } + } + } else { + // find host + for _, rule := range ingress.Spec.Rules { + return rule.Host, nil + } + } + + return "", fmt.Errorf("couldn't find any host in loft ingress '%s/loft-ingress', please make sure you have not changed any deployed resources", namespace) +} + +func WaitForReadyLoftPod(ctx context.Context, kubeClient kubernetes.Interface, namespace string, log log.Logger) (*corev1.Pod, error) { + // wait until we have a running loft pod + now := time.Now() + pod := &corev1.Pod{} + err := wait.PollUntilContextTimeout(ctx, time.Second*2, config.Timeout(), true, func(ctx context.Context) (bool, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "app=loft", + }) + if err != nil { + log.Warnf("Error trying to retrieve %s pod: %v", product.DisplayName(), err) + return false, nil + } else if len(pods.Items) == 0 { + if time.Now().After(now.Add(time.Second * 10)) { + log.Infof("Still waiting for a %s pod...", product.DisplayName()) + now = time.Now() + } + return false, nil + } + + sort.Slice(pods.Items, func(i, j int) bool { + return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) + }) + + loftPod := &pods.Items[0] + found := false + for _, containerStatus := range loftPod.Status.ContainerStatuses { + if containerStatus.State.Running != nil && containerStatus.Ready { + if containerStatus.Name == "manager" { + found = true + } + + continue + } else if containerStatus.State.Terminated != nil || (containerStatus.State.Waiting != nil && CriticalStatus[containerStatus.State.Waiting.Reason]) { + reason := "" + message := "" + if containerStatus.State.Terminated != nil { + reason = containerStatus.State.Terminated.Reason + message = containerStatus.State.Terminated.Message + } else if containerStatus.State.Waiting != nil { + reason = containerStatus.State.Waiting.Reason + message = containerStatus.State.Waiting.Message + } + + out, err := kubeClient.CoreV1().Pods(namespace).GetLogs(loftPod.Name, &corev1.PodLogOptions{ + Container: "manager", + }).Do(context.Background()).Raw() + if err != nil { + return false, fmt.Errorf("there seems to be an issue with %s starting up: %s (%s). Please reach out to our support at https://loft.sh/", product.DisplayName(), message, reason) + } + if strings.Contains(string(out), "register instance: Post \"https://license.loft.sh/register\": dial tcp") { + return false, fmt.Errorf("%[1]s logs: \n%[2]v \nThere seems to be an issue with %[1]s starting up. Looks like you try to install %[1]s into an air-gapped environment, please reach out to our support at https://loft.sh/ for an offline license", product.DisplayName(), string(out)) + } + + return false, fmt.Errorf("%[1]s logs: \n%v \nThere seems to be an issue with %[1]s starting up: %[2]s (%[3]s). Please reach out to our support at https://loft.sh/", product.DisplayName(), string(out), message, reason) + } else if containerStatus.State.Waiting != nil && time.Now().After(now.Add(time.Second*10)) { + if containerStatus.State.Waiting.Message != "" { + log.Infof("Please keep waiting, %s container is still starting up: %s (%s)", product.DisplayName(), containerStatus.State.Waiting.Message, containerStatus.State.Waiting.Reason) + } else if containerStatus.State.Waiting.Reason != "" { + log.Infof("Please keep waiting, %s container is still starting up: %s", product.DisplayName(), containerStatus.State.Waiting.Reason) + } else { + log.Infof("Please keep waiting, %s container is still starting up...", product.DisplayName()) + } + + now = time.Now() + } + + return false, nil + } + + pod = loftPod + return found, nil + }) + if err != nil { + return nil, err + } + + return pod, nil +} + +func StartPortForwarding(ctx context.Context, config *rest.Config, client kubernetes.Interface, pod *corev1.Pod, localPort string, log log.Logger) (chan struct{}, error) { + log.WriteString(logrus.InfoLevel, "\n") + log.Infof("Starting port-forwarding to the %s pod", product.DisplayName()) + execRequest := client.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(pod.Name). + Namespace(pod.Namespace). + SubResource("portforward") + + t, upgrader, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: t}, "POST", execRequest.URL()) + errChan := make(chan error) + readyChan := make(chan struct{}) + stopChan := make(chan struct{}) + targetPort := getPortForwardingTargetPort(pod) + forwarder, err := portforward.New(dialer, []string{localPort + ":" + strconv.Itoa(targetPort)}, stopChan, readyChan, errChan, io.Discard, io.Discard) + if err != nil { + return nil, err + } + + go func() { + err := forwarder.ForwardPorts(ctx) + if err != nil { + errChan <- err + } + }() + + // wait till ready + select { + case err = <-errChan: + return nil, err + case <-readyChan: + case <-stopChan: + return nil, fmt.Errorf("stopped before ready") + } + + // start watcher + go func() { + for { + select { + case <-stopChan: + return + case err = <-errChan: + log.Infof("error during port forwarder: %v", err) + close(stopChan) + return + } + } + }() + + return stopChan, nil +} + +func GetLoftDefaultPassword(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (string, error) { + loftNamespace, err := kubeClient.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + loftNamespace, err := kubeClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + }, metav1.CreateOptions{}) + if err != nil { + return "", err + } + + return string(loftNamespace.UID), nil + } + + return "", err + } + + return string(loftNamespace.UID), nil +} + +type version struct { + Version string `json:"version"` +} + +func IsLoftReachable(ctx context.Context, host string) (bool, error) { + // wait until loft is reachable at the given url + client := &http.Client{ + Transport: httputil.InsecureTransport(), + } + url := "https://" + host + "/version" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return false, fmt.Errorf("error creating request with context: %w", err) + } + resp, err := client.Do(req) + if err == nil && resp.StatusCode == http.StatusOK { + out, err := io.ReadAll(resp.Body) + if err != nil { + return false, nil + } + + v := &version{} + err = json.Unmarshal(out, v) + if err != nil { + return false, fmt.Errorf("error decoding response from %s: %w. Try running '%s --reset'", url, err, product.StartCmd()) + } else if v.Version == "" { + return false, fmt.Errorf("unexpected response from %s: %s. Try running '%s --reset'", url, string(out), product.StartCmd()) + } + + return true, nil + } + + return false, nil +} + +func IsLocalCluster(host string, log log.Logger) bool { + url, err := url.Parse(host) + if err != nil { + log.Warnf("Couldn't parse kube context host url: %v", err) + return false + } + + hostname := url.Hostname() + ip := net.ParseIP(hostname) + if ip != nil { + if IsPrivateIP(ip) { + return true + } + } + + if hostname == "localhost" || strings.HasSuffix(hostname, ".internal") || strings.HasSuffix(hostname, ".localhost") { + return true + } + + return false +} + +var privateIPBlocks []*net.IPNet + +func init() { + for _, cidr := range []string{ + "127.0.0.0/8", // IPv4 loopback + "10.0.0.0/8", // RFC1918 + "172.16.0.0/12", // RFC1918 + "192.168.0.0/16", // RFC1918 + "::1/128", // IPv6 loopback + "fe80::/10", // IPv6 link-local + "fc00::/7", // IPv6 unique local addr + } { + _, block, _ := net.ParseCIDR(cidr) + privateIPBlocks = append(privateIPBlocks, block) + } +} + +// IsPrivateIP checks if a given ip is private +func IsPrivateIP(ip net.IP) bool { + for _, block := range privateIPBlocks { + if block.Contains(ip) { + return true + } + } + + return false +} + +func EnterHostNameQuestion(log log.Logger) (string, error) { + return log.Question(&survey.QuestionOptions{ + Question: fmt.Sprintf("Enter a hostname for your %s instance (e.g. loft.my-domain.tld): \n ", product.DisplayName()), + ValidationFunc: func(answer string) error { + u, err := url.Parse("https://" + answer) + if err != nil || u.Path != "" || u.Port() != "" || len(strings.Split(answer, ".")) < 2 { + return fmt.Errorf("please enter a valid hostname without protocol (https://), without path and without port, e.g. loft.my-domain.tld") + } + return nil + }, + }) +} + +func IsLoftAlreadyInstalled(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (bool, error) { + _, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + + return false, fmt.Errorf("error accessing kubernetes cluster: %w", err) + } + + return true, nil +} + +func UninstallLoft(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, kubeContext, namespace string, log log.Logger) error { + log.Infof("Uninstalling %s...", product.DisplayName()) + releaseName := defaultReleaseName + deploy, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } else if deploy != nil && deploy.Labels != nil && deploy.Labels["release"] != "" { + releaseName = deploy.Labels["release"] + } + + args := []string{ + "uninstall", + releaseName, + "--kube-context", + kubeContext, + "--namespace", + namespace, + } + log.Infof("Executing command: helm %s", strings.Join(args, " ")) + output, err := exec.Command("helm", args...).CombinedOutput() + if err != nil { + log.Errorf("error during helm command: %s (%v)", string(output), err) + } + + // we also cleanup the validating webhook configuration and apiservice + apiRegistrationClient, err := clientset.NewForConfig(restConfig) + if err != nil { + return err + } + + err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1.management.loft.sh", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = deleteUser(ctx, restConfig, "admin") + if err != nil { + return err + } + + err = kubeClient.CoreV1().Secrets(namespace).Delete(context.Background(), "loft-user-secret-admin", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = kubeClient.CoreV1().Secrets(namespace).Delete(context.Background(), LoftRouterDomainSecret, metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + // we also cleanup the validating webhook configuration and apiservice + err = kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(ctx, "loft-agent", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1alpha1.tenancy.kiosk.sh", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1.cluster.loft.sh", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, "loft-agent-controller", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, "loft-applied-defaults", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + log.WriteString(logrus.InfoLevel, "\n") + log.Done(product.Replace("Successfully uninstalled Loft")) + log.WriteString(logrus.InfoLevel, "\n") + + return nil +} + +func deleteUser(ctx context.Context, restConfig *rest.Config, name string) error { + loftClient, err := loftclientset.NewForConfig(restConfig) + if err != nil { + return err + } + + user, err := loftClient.StorageV1().Users().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil + } else if len(user.Finalizers) > 0 { + user.Finalizers = nil + _, err = loftClient.StorageV1().Users().Update(ctx, user, metav1.UpdateOptions{}) + if err != nil { + if kerrors.IsConflict(err) { + return deleteUser(ctx, restConfig, name) + } + + return err + } + } + + err = loftClient.StorageV1().Users().Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + return nil +} + +func EnsureIngressController(ctx context.Context, kubeClient kubernetes.Interface, kubeContext string, log log.Logger) error { + // first create an ingress controller + const ( + YesOption = "Yes" + NoOption = "No, I already have an ingress controller installed." + ) + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Ingress controller required. Should the nginx-ingress controller be installed?", + DefaultValue: YesOption, + Options: []string{ + YesOption, + NoOption, + }, + }) + if err != nil { + return err + } + + if answer == YesOption { + args := []string{ + "install", + "ingress-nginx", + "ingress-nginx", + "--repository-config=''", + "--repo", + "https://kubernetes.github.io/ingress-nginx", + "--kube-context", + kubeContext, + "--namespace", + "ingress-nginx", + "--create-namespace", + "--set-string", + "controller.config.hsts=false", + "--wait", + } + log.WriteString(logrus.InfoLevel, "\n") + log.Infof("Executing command: helm %s\n", strings.Join(args, " ")) + log.Info("Waiting for ingress controller deployment, this can take several minutes...") + helmCmd := exec.Command("helm", args...) + output, err := helmCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error during helm command: %s (%w)", string(output), err) + } + + list, err := kubeClient.CoreV1().Secrets("ingress-nginx").List(ctx, metav1.ListOptions{ + LabelSelector: "name=ingress-nginx,owner=helm,status=deployed", + }) + if err != nil { + return err + } + + if len(list.Items) == 1 { + secret := list.Items[0] + originalSecret := secret.DeepCopy() + secret.Labels["loft.sh/app"] = "true" + if secret.Annotations == nil { + secret.Annotations = map[string]string{} + } + + secret.Annotations["loft.sh/url"] = "https://kubernetes.github.io/ingress-nginx" + originalJSON, err := json.Marshal(originalSecret) + if err != nil { + return err + } + modifiedJSON, err := json.Marshal(secret) + if err != nil { + return err + } + data, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return err + } + _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Patch(ctx, secret.Name, types.MergePatchType, data, metav1.PatchOptions{}) + if err != nil { + return err + } + } + + log.Done("Successfully installed ingress-nginx to your kubernetes cluster!") + } + + return nil +} + +func UpgradeLoft(chartName, chartRepo, kubeContext, namespace string, extraArgs []string, log log.Logger) error { + // now we install loft + args := []string{ + "upgrade", + defaultReleaseName, + chartName, + "--install", + "--reuse-values", + "--create-namespace", + "--repository-config=''", + "--kube-context", + kubeContext, + "--namespace", + namespace, + } + if chartRepo != "" { + args = append(args, "--repo", chartRepo) + } + args = append(args, extraArgs...) + + log.WriteString(logrus.InfoLevel, "\n") + log.Infof("Executing command: helm %s\n", strings.Join(args, " ")) + log.Info("Waiting for helm command, this can take up to several minutes...") + helmCmd := exec.Command("helm", args...) + if chartRepo != "" { + helmWorkDir, err := getHelmWorkdir(chartName) + if err != nil { + return err + } + + helmCmd.Dir = helmWorkDir + } + output, err := helmCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error during helm command: %s (%w)", string(output), err) + } + + log.Donef("%s has been deployed to your cluster!", product.DisplayName()) + return nil +} + +func GetLoftManifests(chartName, chartRepo, kubeContext, namespace string, extraArgs []string, _ log.Logger) (string, error) { + args := []string{ + "template", + defaultReleaseName, + chartName, + "--repository-config=''", + "--kube-context", + kubeContext, + "--namespace", + namespace, + } + if chartRepo != "" { + args = append(args, "--repo", chartRepo) + } + args = append(args, extraArgs...) + + helmCmd := exec.Command("helm", args...) + if chartRepo != "" { + helmWorkDir, err := getHelmWorkdir(chartName) + if err != nil { + return "", err + } + + helmCmd.Dir = helmWorkDir + } + output, err := helmCmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("error during helm command: %s (%w)", string(output), err) + } + return string(output), nil +} + +// Return the directory where the `helm` commands should be executed or error if none can be found/created +// Uses current workdir by default unless it contains a folder with the chart name +func getHelmWorkdir(chartName string) (string, error) { + // If chartName folder exists, check temp dir next + if _, err := os.Stat(chartName); err == nil { + tempDir := os.TempDir() + + // If tempDir/chartName folder exists, create temp folder + if _, err := os.Stat(path.Join(tempDir, chartName)); err == nil { + tempDir, err = os.MkdirTemp(tempDir, chartName) + if err != nil { + return "", errors.New("problematic directory `" + chartName + "` found: please execute command in a different folder") + } + } + + // Use tempDir + return tempDir, nil + } + + // Use current workdir + return "", nil +} + +// Makes sure that admin user and password secret exists +// Returns (true, nil) if everything is correct but password is different from parameter `password` +func EnsureAdminPassword(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, password string, log log.Logger) (bool, error) { + loftClient, err := loftclientset.NewForConfig(restConfig) + if err != nil { + return false, err + } + + admin, err := loftClient.StorageV1().Users().Get(ctx, "admin", metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return false, err + } else if admin == nil { + admin, err = loftClient.StorageV1().Users().Create(ctx, &storagev1.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "admin", + }, + Spec: storagev1.UserSpec{ + Username: "admin", + Email: "test@domain.tld", + Subject: "admin", + Groups: []string{"system:masters"}, + PasswordRef: &storagev1.SecretRef{ + SecretName: "loft-user-secret-admin", + SecretNamespace: "loft", + Key: "password", + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return false, err + } + } else if admin.Spec.PasswordRef == nil || admin.Spec.PasswordRef.SecretName == "" || admin.Spec.PasswordRef.SecretNamespace == "" { + return false, nil + } + + key := admin.Spec.PasswordRef.Key + if key == "" { + key = "password" + } + + passwordHash := fmt.Sprintf("%x", sha256.Sum256([]byte(password))) + + secret, err := kubeClient.CoreV1().Secrets(admin.Spec.PasswordRef.SecretNamespace).Get(ctx, admin.Spec.PasswordRef.SecretName, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return false, err + } else if err == nil { + existingPasswordHash, keyExists := secret.Data[key] + if keyExists { + return (string(existingPasswordHash) != passwordHash), nil + } + + secret.Data[key] = []byte(passwordHash) + _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Update(ctx, secret, metav1.UpdateOptions{}) + if err != nil { + return false, errors.Wrap(err, "update admin password secret") + } + return false, nil + } + + // create the password secret if it was not found, this can happen if you delete the loft namespace without deleting the admin user + secret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: admin.Spec.PasswordRef.SecretName, + Namespace: admin.Spec.PasswordRef.SecretNamespace, + }, + Data: map[string][]byte{ + key: []byte(passwordHash), + }, + } + _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Create(ctx, secret, metav1.CreateOptions{}) + if err != nil { + return false, errors.Wrap(err, "create admin password secret") + } + + log.Info("Successfully recreated admin password secret") + return false, nil +} + +func IsLoftInstalledLocally(ctx context.Context, kubeClient kubernetes.Interface, namespace string) bool { + _, err := kubeClient.NetworkingV1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + _, err = kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + return kerrors.IsNotFound(err) + } + + return kerrors.IsNotFound(err) +} + +func getPortForwardingTargetPort(pod *corev1.Pod) int { + for _, container := range pod.Spec.Containers { + if container.Name == "manager" { + for _, port := range container.Ports { + if port.Name == "https" { + return int(port.ContainerPort) + } + } + } + } + + return 10443 +} diff --git a/pkg/kube/client.go b/pkg/kube/client.go new file mode 100644 index 0000000000..21699183b2 --- /dev/null +++ b/pkg/kube/client.go @@ -0,0 +1,54 @@ +package kube + +import ( + agentloftclient "github.com/loft-sh/agentapi/v4/pkg/client/loft/clientset_generated/clientset" + loftclient "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset" + + "github.com/pkg/errors" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +type Interface interface { + kubernetes.Interface + Loft() loftclient.Interface + Agent() agentloftclient.Interface +} + +func NewForConfig(c *rest.Config) (Interface, error) { + kubeClient, err := kubernetes.NewForConfig(c) + if err != nil { + return nil, errors.Wrap(err, "create kube client") + } + + loftClient, err := loftclient.NewForConfig(c) + if err != nil { + return nil, errors.Wrap(err, "create loft client") + } + + agentLoftClient, err := agentloftclient.NewForConfig(c) + if err != nil { + return nil, errors.Wrap(err, "create kiosk client") + } + + return &client{ + Interface: kubeClient, + loftClient: loftClient, + agentLoftClient: agentLoftClient, + }, nil +} + +type client struct { + kubernetes.Interface + loftClient loftclient.Interface + agentLoftClient agentloftclient.Interface +} + +func (c *client) Loft() loftclient.Interface { + return c.loftClient +} + +func (c *client) Agent() agentloftclient.Interface { + return c.agentLoftClient +} diff --git a/pkg/kubeconfig/kubeconfig.go b/pkg/kubeconfig/kubeconfig.go new file mode 100644 index 0000000000..602ecf63e0 --- /dev/null +++ b/pkg/kubeconfig/kubeconfig.go @@ -0,0 +1,266 @@ +package kubeconfig + +import ( + "io" + "os" + "path/filepath" + "strings" + + "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" +) + +type ContextOptions struct { + Name string + Server string + CaData []byte + ConfigPath string + InsecureSkipTLSVerify bool + DirectClusterEndpointEnabled bool + VirtualClusterAccessPointEnabled bool + + Token string + ClientKeyData []byte + ClientCertificateData []byte + + CurrentNamespace string + SetActive bool +} + +func SpaceInstanceContextName(projectName, spaceInstanceName string) string { + return "loft_" + spaceInstanceName + "_" + projectName +} + +func VirtualClusterInstanceContextName(projectName, virtualClusterInstance string) string { + return "loft-vcluster_" + virtualClusterInstance + "_" + projectName +} + +func virtualClusterInstanceProjectAndNameFromContextName(contextName string) (string, string) { + return strings.Split(contextName, "_")[2], strings.Split(contextName, "_")[1] +} + +func SpaceContextName(clusterName, namespaceName string) string { + contextName := "loft_" + if namespaceName != "" { + contextName += namespaceName + "_" + } + + contextName += clusterName + return contextName +} + +func VirtualClusterContextName(clusterName, namespaceName, virtualClusterName string) string { + return "loft-vcluster_" + virtualClusterName + "_" + namespaceName + "_" + clusterName +} + +func ManagementContextName() string { + return "loft-management" +} + +func ParseContext(contextName string) (isLoftContext bool, cluster string, namespace string, vCluster string) { + splitted := strings.Split(contextName, "_") + if len(splitted) == 0 || (splitted[0] != "loft" && splitted[0] != "loft-vcluster") { + return false, "", "", "" + } + + // cluster or space context + if splitted[0] == "loft" { + if len(splitted) > 3 || len(splitted) == 1 { + return false, "", "", "" + } else if len(splitted) == 2 { + return true, splitted[1], "", "" + } + + return true, splitted[2], splitted[1], "" + } + + // vCluster context + if len(splitted) != 4 { + return false, "", "", "" + } + + return true, splitted[3], splitted[2], splitted[1] +} + +func CurrentContext() (string, error) { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return "", err + } + + return config.CurrentContext, nil +} + +// DeleteContext deletes the context with the given name from the kube config +func DeleteContext(contextName string) error { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return err + } + + delete(config.Contexts, contextName) + delete(config.Clusters, contextName) + delete(config.AuthInfos, contextName) + + if config.CurrentContext == contextName { + config.CurrentContext = "" + for name := range config.Contexts { + config.CurrentContext = name + break + } + } + + // Save the config + return clientcmd.ModifyConfig(clientcmd.NewDefaultClientConfigLoadingRules(), config, false) +} + +func updateKubeConfig(contextName string, cluster *api.Cluster, authInfo *api.AuthInfo, namespaceName string, setActive bool) error { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return err + } + + config.Clusters[contextName] = cluster + config.AuthInfos[contextName] = authInfo + + // Update kube context + context := api.NewContext() + context.Cluster = contextName + context.AuthInfo = contextName + context.Namespace = namespaceName + + config.Contexts[contextName] = context + if setActive { + config.CurrentContext = contextName + } + + // Save the config + return clientcmd.ModifyConfig(clientcmd.NewDefaultClientConfigLoadingRules(), config, false) +} + +func printKubeConfigTo(contextName string, cluster *api.Cluster, authInfo *api.AuthInfo, namespaceName string, writer io.Writer) error { + config := api.NewConfig() + + config.Clusters[contextName] = cluster + config.AuthInfos[contextName] = authInfo + + // Update kube context + context := api.NewContext() + context.Cluster = contextName + context.AuthInfo = contextName + context.Namespace = namespaceName + + config.Contexts[contextName] = context + config.CurrentContext = contextName + + // set kind & version + config.APIVersion = "v1" + config.Kind = "Config" + + out, err := clientcmd.Write(*config) + if err != nil { + return err + } + + _, err = writer.Write(out) + return err +} + +// UpdateKubeConfig updates the kube config and adds the virtual cluster context +func UpdateKubeConfig(options ContextOptions) error { + contextName, cluster, authInfo, err := createContext(options) + if err != nil { + return err + } + + // we don't want to set the space name here as the default namespace in the virtual cluster, because it couldn't exist + return updateKubeConfig(contextName, cluster, authInfo, options.CurrentNamespace, options.SetActive) +} + +// PrintKubeConfigTo prints the given config to the writer +func PrintKubeConfigTo(options ContextOptions, writer io.Writer) error { + contextName, cluster, authInfo, err := createContext(options) + if err != nil { + return err + } + + // we don't want to set the space name here as the default namespace in the virtual cluster, because it couldn't exist + return printKubeConfigTo(contextName, cluster, authInfo, options.CurrentNamespace, writer) +} + +// PrintTokenKubeConfig writes the kube config to the os.Stdout +func PrintTokenKubeConfig(restConfig *rest.Config, token string) error { + contextName, cluster, authInfo := createTokenContext(restConfig, token) + + return printKubeConfigTo(contextName, cluster, authInfo, "", os.Stdout) +} + +// WriteTokenKubeConfig writes the kube config to the io.Writer +func WriteTokenKubeConfig(restConfig *rest.Config, token string, w io.Writer) error { + contextName, cluster, authInfo := createTokenContext(restConfig, token) + + return printKubeConfigTo(contextName, cluster, authInfo, "", w) +} + +func createTokenContext(restConfig *rest.Config, token string) (string, *api.Cluster, *api.AuthInfo) { + contextName := "default" + + cluster := api.NewCluster() + cluster.Server = restConfig.Host + cluster.InsecureSkipTLSVerify = restConfig.Insecure + cluster.CertificateAuthority = restConfig.CAFile + cluster.CertificateAuthorityData = restConfig.CAData + cluster.TLSServerName = restConfig.ServerName + + authInfo := api.NewAuthInfo() + authInfo.Token = token + + return contextName, cluster, authInfo +} + +func createContext(options ContextOptions) (string, *api.Cluster, *api.AuthInfo, error) { + contextName := options.Name + cluster := api.NewCluster() + cluster.Server = options.Server + cluster.CertificateAuthorityData = options.CaData + cluster.InsecureSkipTLSVerify = options.InsecureSkipTLSVerify + + authInfo := api.NewAuthInfo() + if options.Token != "" || options.ClientCertificateData != nil || options.ClientKeyData != nil { + authInfo.Token = options.Token + authInfo.ClientKeyData = options.ClientKeyData + authInfo.ClientCertificateData = options.ClientCertificateData + } else { + command, err := os.Executable() + if err != nil { + return "", nil, nil, err + } + + absConfigPath, err := filepath.Abs(options.ConfigPath) + if err != nil { + return "", nil, nil, err + } + + if options.VirtualClusterAccessPointEnabled { + projectName, virtualClusterName := virtualClusterInstanceProjectAndNameFromContextName(contextName) + authInfo.Exec = &api.ExecConfig{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Command: command, + Args: []string{"token", "--silent", "--project", projectName, "--virtual-cluster", virtualClusterName}, + } + } else { + authInfo.Exec = &api.ExecConfig{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Command: command, + Args: []string{"token", "--silent", "--config", absConfigPath}, + } + if options.DirectClusterEndpointEnabled { + authInfo.Exec.Args = append(authInfo.Exec.Args, "--direct-cluster-endpoint") + } + } + } + + return contextName, cluster, authInfo, nil +} diff --git a/pkg/loftclient/client.go b/pkg/loftclient/client.go new file mode 100644 index 0000000000..e52ad20c03 --- /dev/null +++ b/pkg/loftclient/client.go @@ -0,0 +1,628 @@ +package client + +import ( + "context" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/blang/semver" + "github.com/loft-sh/loftctl/v4/pkg/client/naming" + "github.com/loft-sh/loftctl/v4/pkg/kubeconfig" + + "github.com/loft-sh/api/v4/pkg/auth" + "github.com/loft-sh/api/v4/pkg/product" + + managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" + storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + "github.com/loft-sh/loftctl/v4/pkg/constants" + "github.com/loft-sh/loftctl/v4/pkg/kube" + "github.com/loft-sh/loftctl/v4/pkg/upgrade" + "github.com/loft-sh/log" + "github.com/mitchellh/go-homedir" + perrors "github.com/pkg/errors" + "github.com/skratchdot/open-golang/open" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +var CacheFolder = ".loft" + +// DefaultCacheConfig is the path to the config +var DefaultCacheConfig = "config.json" + +const ( + VersionPath = "%s/version" + LoginPath = "%s/login?cli=true" + RedirectPath = "%s/spaces" + AccessKeyPath = "%s/profile/access-keys" + RefreshToken = time.Minute * 30 +) + +func init() { + hd, _ := homedir.Dir() + if folder, ok := os.LookupEnv(constants.LoftCacheFolderEnv); ok { + CacheFolder = filepath.Join(hd, folder) + } else { + CacheFolder = filepath.Join(hd, CacheFolder) + } + DefaultCacheConfig = filepath.Join(CacheFolder, DefaultCacheConfig) +} + +type Client interface { + Management() (kube.Interface, error) + ManagementConfig() (*rest.Config, error) + + SpaceInstance(project, name string) (kube.Interface, error) + SpaceInstanceConfig(project, name string) (*rest.Config, error) + + VirtualClusterInstance(project, name string) (kube.Interface, error) + VirtualClusterInstanceConfig(project, name string) (*rest.Config, error) + + Cluster(cluster string) (kube.Interface, error) + ClusterConfig(cluster string) (*rest.Config, error) + + VirtualCluster(cluster, namespace, virtualCluster string) (kube.Interface, error) + VirtualClusterConfig(cluster, namespace, virtualCluster string) (*rest.Config, error) + + Login(host string, insecure bool, log log.Logger) error + LoginWithAccessKey(host, accessKey string, insecure bool) error + LoginRaw(host, accessKey string, insecure bool) error + + Logout(ctx context.Context) error + + Version() (*auth.Version, error) + Config() *Config + DirectClusterEndpointToken(forceRefresh bool) (string, error) + VirtualClusterAccessPointCertificate(project, virtualCluster string, forceRefresh bool) (string, string, error) + Save() error +} + +func NewClient() Client { + return &client{ + config: &Config{}, + } +} + +func NewClientFromPath(path string) (Client, error) { + c := &client{ + configPath: path, + } + + err := c.initConfig() + if err != nil { + return nil, err + } + + return c, nil +} + +type client struct { + config *Config + configPath string + configOnce sync.Once +} + +// Logout implements Client. +func (c *client) Logout(ctx context.Context) error { + managementClient, err := c.Management() + if err != nil { + return fmt.Errorf("create management client: %w", err) + } + + self, err := managementClient.Loft().ManagementV1().Selves().Create(ctx, &managementv1.Self{}, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("get self: %w", err) + } + + if self.Status.AccessKey != "" && self.Status.AccessKeyType == storagev1.AccessKeyTypeLogin { + err = managementClient.Loft().ManagementV1().OwnedAccessKeys().Delete(ctx, self.Status.AccessKey, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("delete access key: %w", err) + } + } + + return nil +} + +func (c *client) initConfig() error { + var retErr error + c.configOnce.Do(func() { + // load the config or create new one if not found + content, err := os.ReadFile(c.configPath) + if err != nil { + if os.IsNotExist(err) { + c.config = NewConfig() + return + } + + retErr = err + return + } + + config := &Config{ + VirtualClusterAccessPointCertificates: make(map[string]VirtualClusterCertificatesEntry), + } + err = json.Unmarshal(content, config) + if err != nil { + retErr = err + return + } + + c.config = config + }) + + return retErr +} + +func (c *client) VirtualClusterAccessPointCertificate(project, virtualCluster string, forceRefresh bool) (string, string, error) { + if c.config == nil { + return "", "", perrors.New("no config loaded") + } + + contextName := kubeconfig.VirtualClusterInstanceContextName(project, virtualCluster) + + // see if we have stored cert data for this vci + now := metav1.Now() + cachedVirtualClusterAccessPointCertificate, ok := c.config.VirtualClusterAccessPointCertificates[contextName] + if !forceRefresh && ok && cachedVirtualClusterAccessPointCertificate.LastRequested.Add(RefreshToken).After(now.Time) && cachedVirtualClusterAccessPointCertificate.ExpirationTime.After(now.Time) { + return cachedVirtualClusterAccessPointCertificate.CertificateData, cachedVirtualClusterAccessPointCertificate.KeyData, nil + } + + // refresh token + managementClient, err := c.Management() + if err != nil { + return "", "", err + } + + kubeConfigResponse, err := managementClient.Loft().ManagementV1().VirtualClusterInstances(naming.ProjectNamespace(project)).GetKubeConfig( + context.Background(), + virtualCluster, + &managementv1.VirtualClusterInstanceKubeConfig{ + Spec: managementv1.VirtualClusterInstanceKubeConfigSpec{ + CertificateTTL: ptr.To[int32](86_400), + }, + }, + metav1.CreateOptions{}, + ) + if err != nil { + return "", "", perrors.Wrap(err, "fetch certificate data") + } + + certificateData, keyData, err := getCertificateAndKeyDataFromKubeConfig(kubeConfigResponse.Status.KubeConfig) + if err != nil { + return "", "", err + } + + if c.config.VirtualClusterAccessPointCertificates == nil { + c.config.VirtualClusterAccessPointCertificates = make(map[string]VirtualClusterCertificatesEntry) + } + c.config.VirtualClusterAccessPointCertificates[contextName] = VirtualClusterCertificatesEntry{ + CertificateData: certificateData, + KeyData: keyData, + LastRequested: now, + ExpirationTime: now.Add(86_400 * time.Second), + } + + err = c.Save() + if err != nil { + return "", "", perrors.Wrap(err, "save config") + } + + return certificateData, keyData, nil +} + +func getCertificateAndKeyDataFromKubeConfig(config string) (string, string, error) { + clientCfg, err := clientcmd.NewClientConfigFromBytes([]byte(config)) + if err != nil { + return "", "", err + } + + apiCfg, err := clientCfg.RawConfig() + if err != nil { + return "", "", err + } + + return string(apiCfg.AuthInfos["vcluster"].ClientCertificateData), string(apiCfg.AuthInfos["vcluster"].ClientKeyData), nil +} + +func (c *client) DirectClusterEndpointToken(forceRefresh bool) (string, error) { + if c.config == nil { + return "", perrors.New("no config loaded") + } + + // check if we can use existing token + now := metav1.Now() + if !forceRefresh && c.config.DirectClusterEndpointToken != "" && c.config.DirectClusterEndpointTokenRequested != nil && c.config.DirectClusterEndpointTokenRequested.Add(RefreshToken).After(now.Time) { + return c.config.DirectClusterEndpointToken, nil + } + + // refresh token + managementClient, err := c.Management() + if err != nil { + return "", err + } + + clusterGatewayToken, err := managementClient.Loft().ManagementV1().DirectClusterEndpointTokens().Create(context.Background(), &managementv1.DirectClusterEndpointToken{}, metav1.CreateOptions{}) + if err != nil { + if c.config.DirectClusterEndpointToken != "" && c.config.DirectClusterEndpointTokenRequested != nil && c.config.DirectClusterEndpointTokenRequested.Add(time.Hour*24).After(now.Time) { + return c.config.DirectClusterEndpointToken, nil + } + + return "", err + } else if clusterGatewayToken.Status.Token == "" { + return "", perrors.New("retrieved an empty token") + } + + c.config.DirectClusterEndpointToken = clusterGatewayToken.Status.Token + c.config.DirectClusterEndpointTokenRequested = &now + err = c.Save() + if err != nil { + return "", perrors.Wrap(err, "save config") + } + + return c.config.DirectClusterEndpointToken, nil +} + +func (c *client) Save() error { + if c.configPath == "" { + return nil + } + if c.config == nil { + return perrors.New("no config to write") + } + if c.config.Kind == "" { + c.config.Kind = "Config" + } + if c.config.APIVersion == "" { + c.config.APIVersion = "storage.loft.sh/v1" + } + + err := os.MkdirAll(filepath.Dir(c.configPath), 0o755) + if err != nil { + return err + } + + out, err := json.Marshal(c.config) + if err != nil { + return err + } + + return os.WriteFile(c.configPath, out, 0o660) +} + +func (c *client) ManagementConfig() (*rest.Config, error) { + return c.restConfig("/kubernetes/management") +} + +func (c *client) Management() (kube.Interface, error) { + restConfig, err := c.ManagementConfig() + if err != nil { + return nil, err + } + + return kube.NewForConfig(restConfig) +} + +func (c *client) SpaceInstanceConfig(project, name string) (*rest.Config, error) { + return c.restConfig("/kubernetes/project/" + project + "/space/" + name) +} + +func (c *client) SpaceInstance(project, name string) (kube.Interface, error) { + restConfig, err := c.SpaceInstanceConfig(project, name) + if err != nil { + return nil, err + } + + return kube.NewForConfig(restConfig) +} + +func (c *client) VirtualClusterInstanceConfig(project, name string) (*rest.Config, error) { + return c.restConfig("/kubernetes/project/" + project + "/virtualcluster/" + name) +} + +func (c *client) VirtualClusterInstance(project, name string) (kube.Interface, error) { + restConfig, err := c.VirtualClusterInstanceConfig(project, name) + if err != nil { + return nil, err + } + + return kube.NewForConfig(restConfig) +} + +func (c *client) ClusterConfig(cluster string) (*rest.Config, error) { + return c.restConfig("/kubernetes/cluster/" + cluster) +} + +func (c *client) Cluster(cluster string) (kube.Interface, error) { + restConfig, err := c.ClusterConfig(cluster) + if err != nil { + return nil, err + } + + return kube.NewForConfig(restConfig) +} + +func (c *client) VirtualClusterConfig(cluster, namespace, virtualCluster string) (*rest.Config, error) { + return c.restConfig("/kubernetes/virtualcluster/" + cluster + "/" + namespace + "/" + virtualCluster) +} + +func (c *client) VirtualCluster(cluster, namespace, virtualCluster string) (kube.Interface, error) { + restConfig, err := c.VirtualClusterConfig(cluster, namespace, virtualCluster) + if err != nil { + return nil, err + } + + return kube.NewForConfig(restConfig) +} + +func (c *client) Config() *Config { + return c.config +} + +type keyStruct struct { + Key string +} + +func verifyHost(host string) error { + if !strings.HasPrefix(host, "https") { + return fmt.Errorf("cannot log into a non https loft instance '%s', please make sure you have TLS enabled", host) + } + + return nil +} + +func (c *client) Version() (*auth.Version, error) { + restConfig, err := c.restConfig("") + if err != nil { + return nil, err + } + + restClient, err := kube.NewForConfig(restConfig) + if err != nil { + return nil, err + } + + raw, err := restClient.CoreV1().RESTClient().Get().RequestURI("/version").DoRaw(context.Background()) + if err != nil { + return nil, perrors.New(fmt.Sprintf("%s\n\nYou may need to login again via `%s login %s --insecure` to allow self-signed certificates\n", err.Error(), os.Args[0], restConfig.Host)) + } + + version := &auth.Version{} + err = json.Unmarshal(raw, version) + if err != nil { + return nil, perrors.Wrap(err, "parse version response") + } + + return version, nil +} + +func (c *client) Login(host string, insecure bool, log log.Logger) error { + var ( + loginUrl = fmt.Sprintf(LoginPath, host) + key keyStruct + keyChannel = make(chan keyStruct) + ) + + err := verifyHost(host) + if err != nil { + return err + } + + server := startServer(fmt.Sprintf(RedirectPath, host), keyChannel, log) + err = open.Run(fmt.Sprintf(LoginPath, host)) + if err != nil { + return fmt.Errorf("couldn't open the login page in a browser: %w. Please use the --access-key flag for the login command. You can generate an access key here: %s", err, fmt.Sprintf(AccessKeyPath, host)) + } else { + log.Infof("If the browser does not open automatically, please navigate to %s", loginUrl) + msg := "If you have problems logging in, please navigate to %s/profile/access-keys, click on 'Create Access Key' and then login via '%s %s --access-key ACCESS_KEY" + if insecure { + msg += " --insecure" + } + msg += "'" + log.Infof(msg, host, product.LoginCmd(), host) + log.Infof("Logging into %s...", product.DisplayName()) + + key = <-keyChannel + } + + go func() { + err = server.Shutdown(context.Background()) + if err != nil { + log.Debugf("Error shutting down server: %v", err) + } + }() + + close(keyChannel) + return c.LoginWithAccessKey(host, key.Key, insecure) +} + +func (c *client) LoginRaw(host, accessKey string, insecure bool) error { + if c.config.Host == host && c.config.AccessKey == accessKey { + return nil + } + + c.config.Host = host + c.config.Insecure = insecure + c.config.AccessKey = accessKey + c.config.DirectClusterEndpointToken = "" + c.config.DirectClusterEndpointTokenRequested = nil + return c.Save() +} + +func (c *client) LoginWithAccessKey(host, accessKey string, insecure bool) error { + err := verifyHost(host) + if err != nil { + return err + } + if c.config.Host == host && c.config.AccessKey == accessKey { + return nil + } + + // delete old access key if were logged in before + if c.config.AccessKey != "" { + managementClient, err := c.Management() + if err == nil { + self, err := managementClient.Loft().ManagementV1().Selves().Create(context.TODO(), &managementv1.Self{}, metav1.CreateOptions{}) + if err == nil && self.Status.AccessKey != "" && self.Status.AccessKeyType == storagev1.AccessKeyTypeLogin { + _ = managementClient.Loft().ManagementV1().OwnedAccessKeys().Delete(context.TODO(), self.Status.AccessKey, metav1.DeleteOptions{}) + } + } + } + + c.config.Host = host + c.config.Insecure = insecure + c.config.AccessKey = accessKey + c.config.DirectClusterEndpointToken = "" + c.config.DirectClusterEndpointTokenRequested = nil + + // verify version + err = VerifyVersion(c) + if err != nil { + return err + } + + // verify the connection works + managementClient, err := c.Management() + if err != nil { + return perrors.Wrap(err, "create management client") + } + + // try to get self + _, err = managementClient.Loft().ManagementV1().Selves().Create(context.TODO(), &managementv1.Self{}, metav1.CreateOptions{}) + if err != nil { + var urlError *url.Error + if errors.As(err, &urlError) { + var err x509.UnknownAuthorityError + if errors.As(urlError.Err, &err) { + return fmt.Errorf("unsafe login endpoint '%s', if you wish to login into an insecure loft endpoint run with the '--insecure' flag", c.config.Host) + } + } + + return perrors.Errorf("error logging in: %v", err) + } + + return c.Save() +} + +// VerifyVersion checks if the Loft version is compatible with this CLI version +func VerifyVersion(baseClient Client) error { + v, err := baseClient.Version() + if err != nil { + return err + } else if v.Version == "v0.0.0" { + return nil + } + + backendMajor, err := strconv.Atoi(v.Major) + if err != nil { + return perrors.Wrap(err, "parse major version string") + } + + cliVersionStr := upgrade.GetVersion() + if cliVersionStr == "" { + return nil + } + + cliVersion, err := semver.Parse(cliVersionStr) + if err != nil { + return err + } + + if int(cliVersion.Major) > backendMajor { + return fmt.Errorf("unsupported %[1]s version %[2]s. Please downgrade your CLI to below v%[3]d.0.0 to support this version, as %[1]s v%[3]d.0.0 and newer versions are incompatible with v%[4]d.x.x", product.DisplayName(), v.Version, cliVersion.Major, backendMajor) + } else if int(cliVersion.Major) < backendMajor { + return fmt.Errorf("unsupported %[1]s version %[2]s. Please upgrade your CLI to v%[3]d.0.0 or above to support this version, as %[1]s v%[3]d.0.0 and newer versions are incompatible with v%[4]d.x.x", product.DisplayName(), v.Version, backendMajor, cliVersion.Major) + } + + return nil +} + +func (c *client) restConfig(hostSuffix string) (*rest.Config, error) { + if c.config == nil { + return nil, perrors.New("no config loaded") + } else if c.config.Host == "" || c.config.AccessKey == "" { + return nil, perrors.New(fmt.Sprintf("not logged in, please make sure you have run '%s [%s]'", product.LoginCmd(), product.Url())) + } + + // build a rest config + config, err := GetRestConfig(c.config.Host+hostSuffix, c.config.AccessKey, c.config.Insecure) + if err != nil { + return nil, err + } + + return config, err +} + +func GetKubeConfig(host, token, namespace string, insecure bool) clientcmd.ClientConfig { + contextName := "local" + kubeConfig := clientcmdapi.NewConfig() + kubeConfig.Contexts = map[string]*clientcmdapi.Context{ + contextName: { + Cluster: contextName, + AuthInfo: contextName, + Namespace: namespace, + }, + } + kubeConfig.Clusters = map[string]*clientcmdapi.Cluster{ + contextName: { + Server: host, + InsecureSkipTLSVerify: insecure, + }, + } + kubeConfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{ + contextName: { + Token: token, + }, + } + kubeConfig.CurrentContext = contextName + return clientcmd.NewDefaultClientConfig(*kubeConfig, &clientcmd.ConfigOverrides{}) +} + +func GetRestConfig(host, token string, insecure bool) (*rest.Config, error) { + config, err := GetKubeConfig(host, token, "", insecure).ClientConfig() + if err != nil { + return nil, err + } + config.UserAgent = constants.LoftctlUserAgentPrefix + upgrade.GetVersion() + + return config, nil +} + +func startServer(redirectURI string, keyChannel chan keyStruct, log log.Logger) *http.Server { + srv := &http.Server{Addr: ":25843"} + + http.HandleFunc("/login", func(w http.ResponseWriter, r *http.Request) { + keys, ok := r.URL.Query()["key"] + if !ok || len(keys[0]) == 0 { + log.Warn("Login: the key used to login is not valid") + return + } + + keyChannel <- keyStruct{ + Key: keys[0], + } + http.Redirect(w, r, redirectURI, http.StatusSeeOther) + }) + + go func() { + // cannot panic, because this probably is an intentional close + _ = srv.ListenAndServe() + }() + + // returning reference so caller can call Shutdown() + return srv +} diff --git a/pkg/loftclient/config.go b/pkg/loftclient/config.go new file mode 100644 index 0000000000..0ebc14f37e --- /dev/null +++ b/pkg/loftclient/config.go @@ -0,0 +1,63 @@ +package client + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Config defines the client config structure +type Config struct { + metav1.TypeMeta `json:",inline"` + + // host is the http endpoint of how to access loft + // +optional + Host string `json:"host,omitempty"` + + // LastInstallContext is the last install context + // +optional + LastInstallContext string `json:"lastInstallContext,omitempty"` + + // insecure specifies if the loft instance is insecure + // +optional + Insecure bool `json:"insecure,omitempty"` + + // access key is the access key for the given loft host + // +optional + AccessKey string `json:"accesskey,omitempty"` + + // virtual cluster access key is the access key for the given loft host to create virtual clusters + // +optional + VirtualClusterAccessKey string `json:"virtualClusterAccessKey,omitempty"` + + // DEPRECATED: do not use anymore + // the direct cluster endpoint token + // +optional + DirectClusterEndpointToken string `json:"directClusterEndpointToken,omitempty"` + + // DEPRECATED: do not use anymore + // last time the direct cluster endpoint token was requested + // +optional + DirectClusterEndpointTokenRequested *metav1.Time `json:"directClusterEndpointTokenRequested,omitempty"` + + // map of cached certificates for "access point" mode virtual clusters + // +optional + VirtualClusterAccessPointCertificates map[string]VirtualClusterCertificatesEntry +} + +type VirtualClusterCertificatesEntry struct { + CertificateData string + KeyData string + LastRequested metav1.Time + ExpirationTime time.Time +} + +// NewConfig creates a new config +func NewConfig() *Config { + return &Config{ + TypeMeta: metav1.TypeMeta{ + Kind: "Config", + APIVersion: "storage.loft.sh/v1", + }, + } +} diff --git a/pkg/loftclient/helper/helper.go b/pkg/loftclient/helper/helper.go new file mode 100644 index 0000000000..3fc70b39b0 --- /dev/null +++ b/pkg/loftclient/helper/helper.go @@ -0,0 +1,1160 @@ +package helper + +import ( + "context" + "errors" + "fmt" + "os" + "sort" + "strings" + + "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset/scheme" + "github.com/loft-sh/loftctl/v4/pkg/client/naming" + authorizationv1 "k8s.io/api/authorization/v1" + + clusterv1 "github.com/loft-sh/agentapi/v4/pkg/apis/loft/cluster/v1" + managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" + "github.com/loft-sh/loftctl/v4/pkg/client" + "github.com/loft-sh/loftctl/v4/pkg/clihelper" + "github.com/loft-sh/loftctl/v4/pkg/kube" + "github.com/loft-sh/loftctl/v4/pkg/kubeconfig" + "github.com/loft-sh/log" + "github.com/loft-sh/log/survey" + "github.com/mgutz/ansi" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubectl/pkg/util/term" +) + +var errNoClusterAccess = errors.New("the user has no access to any cluster") + +type VirtualClusterInstanceProject struct { + VirtualCluster *managementv1.VirtualClusterInstance + Project *managementv1.Project +} + +type SpaceInstanceProject struct { + SpaceInstance *managementv1.SpaceInstance + Project *managementv1.Project +} + +func SelectVirtualClusterTemplate(ctx context.Context, baseClient client.Client, projectName, templateName string, log log.Logger) (*managementv1.VirtualClusterTemplate, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectTemplates, err := managementClient.Loft().ManagementV1().Projects().ListTemplates(ctx, projectName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // select default template + if templateName == "" && projectTemplates.DefaultVirtualClusterTemplate != "" { + templateName = projectTemplates.DefaultVirtualClusterTemplate + } + + // try to find template + if templateName != "" { + for _, virtualClusterTemplate := range projectTemplates.VirtualClusterTemplates { + if virtualClusterTemplate.Name == templateName { + return &virtualClusterTemplate, nil + } + } + + return nil, fmt.Errorf("couldn't find template %s as allowed template in project %s", templateName, projectName) + } else if len(projectTemplates.VirtualClusterTemplates) == 0 { + return nil, fmt.Errorf("there are no allowed virtual cluster templates in project %s", projectName) + } else if len(projectTemplates.VirtualClusterTemplates) == 1 { + return &projectTemplates.VirtualClusterTemplates[0], nil + } + + templateNames := []string{} + for _, template := range projectTemplates.VirtualClusterTemplates { + templateNames = append(templateNames, clihelper.GetDisplayName(template.Name, template.Spec.DisplayName)) + } + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a template to use", + DefaultValue: templateNames[0], + Options: templateNames, + }) + if err != nil { + return nil, err + } + for _, template := range projectTemplates.VirtualClusterTemplates { + if answer == clihelper.GetDisplayName(template.Name, template.Spec.DisplayName) { + return &template, nil + } + } + + return nil, fmt.Errorf("answer not found") +} + +func SelectSpaceTemplate(ctx context.Context, baseClient client.Client, projectName, templateName string, log log.Logger) (*managementv1.SpaceTemplate, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectTemplates, err := managementClient.Loft().ManagementV1().Projects().ListTemplates(ctx, projectName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // select default template + if templateName == "" && projectTemplates.DefaultSpaceTemplate != "" { + templateName = projectTemplates.DefaultSpaceTemplate + } + + // try to find template + if templateName != "" { + for _, spaceTemplate := range projectTemplates.SpaceTemplates { + if spaceTemplate.Name == templateName { + return &spaceTemplate, nil + } + } + + return nil, fmt.Errorf("couldn't find template %s as allowed template in project %s", templateName, projectName) + } else if len(projectTemplates.SpaceTemplates) == 0 { + return nil, fmt.Errorf("there are no allowed space templates in project %s", projectName) + } else if len(projectTemplates.SpaceTemplates) == 1 { + return &projectTemplates.SpaceTemplates[0], nil + } + + templateNames := []string{} + for _, template := range projectTemplates.SpaceTemplates { + templateNames = append(templateNames, clihelper.GetDisplayName(template.Name, template.Spec.DisplayName)) + } + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a template to use", + DefaultValue: templateNames[0], + Options: templateNames, + }) + if err != nil { + return nil, err + } + for _, template := range projectTemplates.SpaceTemplates { + if answer == clihelper.GetDisplayName(template.Name, template.Spec.DisplayName) { + return &template, nil + } + } + + return nil, fmt.Errorf("answer not found") +} + +func SelectVirtualClusterInstanceOrVirtualCluster(ctx context.Context, baseClient client.Client, virtualClusterName, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, string, error) { + if clusterName != "" || spaceName != "" { + virtualCluster, space, cluster, err := SelectVirtualClusterAndSpaceAndClusterName(ctx, baseClient, virtualClusterName, spaceName, clusterName, log) + return cluster, "", space, virtualCluster, err + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", "", "", "", err + } + + // gather projects and virtual cluster instances to access + var projects []*managementv1.Project + if projectName != "" { + project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return "", "", "", "", fmt.Errorf("couldn't find or access project %s", projectName) + } + + return "", "", "", "", err + } + + projects = append(projects, project) + } else { + projectsList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil || len(projectsList.Items) == 0 { + virtualCluster, space, cluster, err := SelectVirtualClusterAndSpaceAndClusterName(ctx, baseClient, virtualClusterName, spaceName, clusterName, log) + return cluster, "", space, virtualCluster, err + } + + for _, p := range projectsList.Items { + proj := p + projects = append(projects, &proj) + } + } + + // gather space instances in those projects + var virtualClusters []*VirtualClusterInstanceProject + for _, p := range projects { + if virtualClusterName != "" { + virtualClusterInstance, err := getProjectVirtualClusterInstance(ctx, managementClient, p, virtualClusterName) + if err != nil { + continue + } + + virtualClusters = append(virtualClusters, virtualClusterInstance) + } else { + projectVirtualClusters, err := getProjectVirtualClusterInstances(ctx, managementClient, p) + if err != nil { + continue + } + + virtualClusters = append(virtualClusters, projectVirtualClusters...) + } + } + + // get unformatted options + var optionsUnformatted [][]string + for _, virtualCluster := range virtualClusters { + optionsUnformatted = append(optionsUnformatted, []string{"vcluster: " + clihelper.GetDisplayName(virtualCluster.VirtualCluster.Name, virtualCluster.VirtualCluster.Spec.DisplayName), "Project: " + clihelper.GetDisplayName(virtualCluster.Project.Name, virtualCluster.Project.Spec.DisplayName)}) + } + + // check if there are virtualclusters + if len(virtualClusters) == 0 { + if virtualClusterName != "" { + return "", "", "", "", fmt.Errorf("couldn't find or access virtual cluster %s", virtualClusterName) + } + return "", "", "", "", fmt.Errorf("couldn't find a virtual cluster you have access to") + } else if len(virtualClusters) == 1 { + return "", virtualClusters[0].Project.Name, "", virtualClusters[0].VirtualCluster.Name, nil + } + + questionOptions := formatOptions("%s | %s", optionsUnformatted) + selectedOption, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a virtual cluster", + DefaultValue: questionOptions[0], + Options: questionOptions, + }) + if err != nil { + return "", "", "", "", err + } + + for idx, s := range questionOptions { + if s == selectedOption { + return "", virtualClusters[idx].Project.Name, "", virtualClusters[idx].VirtualCluster.Name, nil + } + } + + return "", "", "", "", fmt.Errorf("couldn't find answer") +} + +func SelectSpaceInstanceOrSpace(ctx context.Context, baseClient client.Client, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, error) { + if clusterName != "" { + space, cluster, err := SelectSpaceAndClusterName(ctx, baseClient, spaceName, clusterName, log) + return cluster, "", space, err + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", "", "", err + } + + // gather projects and space instances to access + var projects []*managementv1.Project + if projectName != "" { + project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return "", "", "", fmt.Errorf("couldn't find or access project %s", projectName) + } + + return "", "", "", err + } + + projects = append(projects, project) + } else { + projectsList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil || len(projectsList.Items) == 0 { + space, cluster, err := SelectSpaceAndClusterName(ctx, baseClient, spaceName, clusterName, log) + return cluster, "", space, err + } + + for _, p := range projectsList.Items { + proj := p + projects = append(projects, &proj) + } + } + + // gather space instances in those projects + var spaces []*SpaceInstanceProject + for _, p := range projects { + if spaceName != "" { + spaceInstance, err := getProjectSpaceInstance(ctx, managementClient, p, spaceName) + if err != nil { + continue + } + + spaces = append(spaces, spaceInstance) + } else { + projectSpaceInstances, err := getProjectSpaceInstances(ctx, managementClient, p) + if err != nil { + continue + } + + spaces = append(spaces, projectSpaceInstances...) + } + } + + // get unformatted options + var optionsUnformatted [][]string + for _, space := range spaces { + optionsUnformatted = append(optionsUnformatted, []string{"Space: " + clihelper.GetDisplayName(space.SpaceInstance.Name, space.SpaceInstance.Spec.DisplayName), "Project: " + clihelper.GetDisplayName(space.Project.Name, space.Project.Spec.DisplayName)}) + } + + // check if there are spaces + if len(spaces) == 0 { + if spaceName != "" { + return "", "", "", fmt.Errorf("couldn't find or access space %s", spaceName) + } + return "", "", "", fmt.Errorf("couldn't find a space you have access to") + } else if len(spaces) == 1 { + return spaces[0].SpaceInstance.Spec.ClusterRef.Cluster, spaces[0].Project.Name, spaces[0].SpaceInstance.Name, nil + } + + questionOptions := formatOptions("%s | %s", optionsUnformatted) + selectedOption, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a space", + DefaultValue: questionOptions[0], + Options: questionOptions, + }) + if err != nil { + return "", "", "", err + } + + for idx, s := range questionOptions { + if s == selectedOption { + return spaces[idx].SpaceInstance.Spec.ClusterRef.Cluster, spaces[idx].Project.Name, spaces[idx].SpaceInstance.Name, nil + } + } + + return "", "", "", fmt.Errorf("couldn't find answer") +} + +func SelectProjectOrCluster(ctx context.Context, baseClient client.Client, clusterName, projectName string, allowClusterOnly bool, log log.Logger) (cluster string, project string, err error) { + if projectName != "" { + return clusterName, projectName, nil + } else if allowClusterOnly && clusterName != "" { + return clusterName, "", nil + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", "", err + } + + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return "", "", err + } + + projectNames := []string{} + for _, project := range projectList.Items { + projectNames = append(projectNames, clihelper.GetDisplayName(project.Name, project.Spec.DisplayName)) + } + + if len(projectNames) == 0 { + cluster, err := SelectCluster(ctx, baseClient, log) + if err != nil { + if errors.Is(err, errNoClusterAccess) { + return "", "", fmt.Errorf("the user has no access to a project") + } + + return "", "", err + } + + return cluster, "", nil + } + + var selectedProject *managementv1.Project + if len(projectNames) == 1 { + selectedProject = &projectList.Items[0] + } else { + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a project to use", + DefaultValue: projectNames[0], + Options: projectNames, + }) + if err != nil { + return "", "", err + } + for idx, project := range projectList.Items { + if answer == clihelper.GetDisplayName(project.Name, project.Spec.DisplayName) { + selectedProject = &projectList.Items[idx] + } + } + if selectedProject == nil { + return "", "", fmt.Errorf("answer not found") + } + } + + if clusterName == "" { + clusterName, err = SelectProjectCluster(ctx, baseClient, selectedProject, log) + return clusterName, selectedProject.Name, err + } + + return clusterName, selectedProject.Name, nil +} + +// SelectCluster lets the user select a cluster +func SelectCluster(ctx context.Context, baseClient client.Client, log log.Logger) (string, error) { + managementClient, err := baseClient.Management() + if err != nil { + return "", err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return "", err + } + + clusterNames := []string{} + for _, cluster := range clusterList.Items { + clusterNames = append(clusterNames, clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName)) + } + + if len(clusterList.Items) == 0 { + return "", errNoClusterAccess + } else if len(clusterList.Items) == 1 { + return clusterList.Items[0].Name, nil + } + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a cluster to use", + DefaultValue: clusterNames[0], + Options: clusterNames, + }) + if err != nil { + return "", err + } + for _, cluster := range clusterList.Items { + if answer == clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName) { + return cluster.Name, nil + } + } + return "", fmt.Errorf("answer not found") +} + +// SelectProjectCluster lets the user select a cluster from the project's allowed clusters +func SelectProjectCluster(ctx context.Context, baseClient client.Client, project *managementv1.Project, log log.Logger) (string, error) { + if !term.IsTerminal(os.Stdin) { + // Allow loft to schedule as before + return "", nil + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", err + } + + clusterList, err := managementClient.Loft().ManagementV1().Projects().ListClusters(ctx, project.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + anyClusterOption := "Any Cluster [Loft Selects Cluster]" + clusterNames := []string{} + for _, allowedCluster := range project.Spec.AllowedClusters { + if allowedCluster.Name == "*" { + clusterNames = append(clusterNames, anyClusterOption) + break + } + } + + for _, cluster := range clusterList.Clusters { + clusterNames = append(clusterNames, clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName)) + } + + if len(clusterList.Clusters) == 0 { + return "", errNoClusterAccess + } else if len(clusterList.Clusters) == 1 { + return clusterList.Clusters[0].Name, nil + } + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a cluster to use", + DefaultValue: clusterNames[0], + Options: clusterNames, + }) + if err != nil { + return "", err + } + + if answer == anyClusterOption { + return "", nil + } + + for _, cluster := range clusterList.Clusters { + if answer == clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName) { + return cluster.Name, nil + } + } + return "", fmt.Errorf("answer not found") +} + +// SelectUserOrTeam lets the user select an user or team in a cluster +func SelectUserOrTeam(ctx context.Context, baseClient client.Client, clusterName string, log log.Logger) (*clusterv1.EntityInfo, *clusterv1.EntityInfo, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, nil, err + } + + clusterAccess, err := managementClient.Loft().ManagementV1().Clusters().ListAccess(ctx, clusterName, metav1.GetOptions{}) + if err != nil { + return nil, nil, err + } + + var user *clusterv1.EntityInfo + if len(clusterAccess.Users) > 0 { + user = &clusterAccess.Users[0].Info + } + + teams := []*clusterv1.EntityInfo{} + for _, team := range clusterAccess.Teams { + t := team + teams = append(teams, &t.Info) + } + + if user == nil && len(teams) == 0 { + return nil, nil, fmt.Errorf("the user has no access to cluster %s", clusterName) + } else if user != nil && len(teams) == 0 { + return user, nil, nil + } else if user == nil && len(teams) == 1 { + return nil, teams[0], nil + } + + names := []string{} + if user != nil { + names = append(names, "User "+clihelper.DisplayName(user)) + } + for _, t := range teams { + names = append(names, "Team "+clihelper.DisplayName(t)) + } + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a user or team to use", + DefaultValue: names[0], + Options: names, + }) + if err != nil { + return nil, nil, err + } + + if user != nil && "User "+clihelper.DisplayName(user) == answer { + return user, nil, nil + } + for _, t := range teams { + if "Team "+clihelper.DisplayName(t) == answer { + return nil, t, nil + } + } + + return nil, nil, fmt.Errorf("answer not found") +} + +type ClusterUserOrTeam struct { + Team bool + ClusterMember managementv1.ClusterMember +} + +func SelectClusterUserOrTeam(ctx context.Context, baseClient client.Client, clusterName, userName, teamName string, log log.Logger) (*ClusterUserOrTeam, error) { + if userName != "" && teamName != "" { + return nil, fmt.Errorf("team and user specified, please only choose one") + } + + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + members, err := managementClient.Loft().ManagementV1().Clusters().ListMembers(ctx, clusterName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("retrieve cluster members: %w", err) + } + + matchedMembers := []ClusterUserOrTeam{} + optionsUnformatted := [][]string{} + for _, user := range members.Users { + if teamName != "" { + continue + } else if userName != "" && user.Info.Name != userName { + continue + } + + matchedMembers = append(matchedMembers, ClusterUserOrTeam{ + ClusterMember: user, + }) + displayName := user.Info.DisplayName + if displayName == "" { + displayName = user.Info.Name + } + + optionsUnformatted = append(optionsUnformatted, []string{"User: " + displayName, "Kube User: " + user.Info.Name}) + } + for _, team := range members.Teams { + if userName != "" { + continue + } else if teamName != "" && team.Info.Name != teamName { + continue + } + + matchedMembers = append(matchedMembers, ClusterUserOrTeam{ + Team: true, + ClusterMember: team, + }) + displayName := team.Info.DisplayName + if displayName == "" { + displayName = team.Info.Name + } + + optionsUnformatted = append(optionsUnformatted, []string{"Team: " + displayName, "Kube Team: " + team.Info.Name}) + } + + questionOptions := formatOptions("%s | %s", optionsUnformatted) + if len(questionOptions) == 0 { + if userName == "" && teamName == "" { + return nil, fmt.Errorf("couldn't find any space") + } else if userName != "" { + return nil, fmt.Errorf("couldn't find user %s in cluster %s", ansi.Color(userName, "white+b"), ansi.Color(clusterName, "white+b")) + } + + return nil, fmt.Errorf("couldn't find team %s in cluster %s", ansi.Color(teamName, "white+b"), ansi.Color(clusterName, "white+b")) + } else if len(questionOptions) == 1 { + return &matchedMembers[0], nil + } + + selectedMember, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a user or team", + DefaultValue: questionOptions[0], + Options: questionOptions, + }) + if err != nil { + return nil, err + } + + for idx, s := range questionOptions { + if s == selectedMember { + return &matchedMembers[idx], nil + } + } + + return nil, fmt.Errorf("selected question option not found") +} + +func GetVirtualClusterInstances(ctx context.Context, baseClient client.Client) ([]*VirtualClusterInstanceProject, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + var retVClusters []*VirtualClusterInstanceProject + for _, project := range projectList.Items { + p := &project + + virtualClusterInstances, err := getProjectVirtualClusterInstances(ctx, managementClient, p) + if err != nil { + return nil, err + } + + retVClusters = append(retVClusters, virtualClusterInstances...) + } + + return retVClusters, nil +} + +func CanAccessProjectSecret(ctx context.Context, managementClient kube.Interface, namespace, name string) (bool, error) { + return CanAccessInstance(ctx, managementClient, namespace, name, "projectsecrets") +} + +func CanAccessInstance(ctx context.Context, managementClient kube.Interface, namespace, name string, resource string) (bool, error) { + selfSubjectAccessReview, err := managementClient.Loft().ManagementV1().SelfSubjectAccessReviews().Create(ctx, &managementv1.SelfSubjectAccessReview{ + Spec: managementv1.SelfSubjectAccessReviewSpec{ + SelfSubjectAccessReviewSpec: authorizationv1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Verb: "use", + Group: managementv1.SchemeGroupVersion.Group, + Version: managementv1.SchemeGroupVersion.Version, + Resource: resource, + Namespace: namespace, + Name: name, + }, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return false, err + } else if !selfSubjectAccessReview.Status.Allowed || selfSubjectAccessReview.Status.Denied { + return false, nil + } + return true, nil +} + +func GetSpaceInstances(ctx context.Context, baseClient client.Client) ([]*SpaceInstanceProject, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + var retSpaces []*SpaceInstanceProject + for _, project := range projectList.Items { + p := &project + + spaceInstances, err := getProjectSpaceInstances(ctx, managementClient, p) + if err != nil { + return nil, err + } + + retSpaces = append(retSpaces, spaceInstances...) + } + + return retSpaces, nil +} + +type ProjectProjectSecret struct { + ProjectSecret managementv1.ProjectSecret + Project string +} + +func GetProjectSecrets(ctx context.Context, managementClient kube.Interface, projectNames ...string) ([]*ProjectProjectSecret, error) { + var projects []*managementv1.Project + if len(projectNames) == 0 { + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for idx := range projectList.Items { + projectItem := projectList.Items[idx] + projects = append(projects, &projectItem) + } + } else { + for _, projectName := range projectNames { + project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + projects = append(projects, project) + } + } + + var retSecrets []*ProjectProjectSecret + for _, project := range projects { + projectSecrets, err := managementClient.Loft().ManagementV1().ProjectSecrets(naming.ProjectNamespace(project.Name)).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for _, projectSecret := range projectSecrets.Items { + canAccess, err := CanAccessProjectSecret(ctx, managementClient, projectSecret.Namespace, projectSecret.Name) + if err != nil { + return nil, err + } else if !canAccess { + continue + } + + retSecrets = append(retSecrets, &ProjectProjectSecret{ + ProjectSecret: projectSecret, + Project: project.Name, + }) + } + } + + return retSecrets, nil +} + +type ClusterSpace struct { + clusterv1.Space + Cluster string +} + +// GetSpaces returns all spaces accessible by the user or team +func GetSpaces(ctx context.Context, baseClient client.Client, log log.Logger) ([]ClusterSpace, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + spaceList := []ClusterSpace{} + for _, cluster := range clusterList.Items { + clusterClient, err := baseClient.Cluster(cluster.Name) + if err != nil { + return nil, err + } + + spaces, err := clusterClient.Agent().ClusterV1().Spaces().List(ctx, metav1.ListOptions{}) + if err != nil { + if kerrors.IsForbidden(err) { + continue + } + + log.Warnf("Error retrieving spaces from cluster %s: %v", clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName), err) + continue + } + + for _, space := range spaces.Items { + spaceList = append(spaceList, ClusterSpace{ + Space: space, + Cluster: cluster.Name, + }) + } + } + sort.Slice(spaceList, func(i, j int) bool { + return spaceList[i].Name < spaceList[j].Name + }) + + return spaceList, nil +} + +type ClusterVirtualCluster struct { + clusterv1.VirtualCluster + Cluster string +} + +// GetVirtualClusters returns all virtual clusters the user has access to +func GetVirtualClusters(ctx context.Context, baseClient client.Client, log log.Logger) ([]ClusterVirtualCluster, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + virtualClusterList := []ClusterVirtualCluster{} + for _, cluster := range clusterList.Items { + clusterClient, err := baseClient.Cluster(cluster.Name) + if err != nil { + return nil, err + } + + virtualClusters, err := clusterClient.Agent().ClusterV1().VirtualClusters("").List(ctx, metav1.ListOptions{}) + if err != nil { + if kerrors.IsForbidden(err) { + continue + } + + log.Warnf("Error retrieving virtual clusters from cluster %s: %v", clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName), err) + continue + } + + for _, virtualCluster := range virtualClusters.Items { + virtualClusterList = append(virtualClusterList, ClusterVirtualCluster{ + VirtualCluster: virtualCluster, + Cluster: cluster.Name, + }) + } + } + sort.Slice(virtualClusterList, func(i, j int) bool { + return virtualClusterList[i].Name < virtualClusterList[j].Name + }) + + return virtualClusterList, nil +} + +// SelectSpaceAndClusterName selects a space and cluster name +func SelectSpaceAndClusterName(ctx context.Context, baseClient client.Client, spaceName, clusterName string, log log.Logger) (string, string, error) { + spaces, err := GetSpaces(ctx, baseClient, log) + if err != nil { + return "", "", err + } + + currentContext, err := kubeconfig.CurrentContext() + if err != nil { + return "", "", fmt.Errorf("loading kubernetes config: %w", err) + } + + isLoftContext, cluster, namespace, vCluster := kubeconfig.ParseContext(currentContext) + matchedSpaces := []ClusterSpace{} + questionOptionsUnformatted := [][]string{} + defaultIndex := 0 + for _, space := range spaces { + if spaceName != "" && space.Space.Name != spaceName { + continue + } else if clusterName != "" && space.Cluster != clusterName { + continue + } else if len(matchedSpaces) > 20 { + break + } + + if isLoftContext && vCluster == "" && cluster == space.Cluster && namespace == space.Space.Name { + defaultIndex = len(questionOptionsUnformatted) + } + + matchedSpaces = append(matchedSpaces, space) + spaceName := space.Space.Name + if space.Space.Annotations != nil && space.Space.Annotations["loft.sh/display-name"] != "" { + spaceName = space.Space.Annotations["loft.sh/display-name"] + " (" + spaceName + ")" + } + + questionOptionsUnformatted = append(questionOptionsUnformatted, []string{spaceName, space.Cluster}) + } + + questionOptions := formatOptions("Space: %s | Cluster: %s", questionOptionsUnformatted) + if len(questionOptions) == 0 { + if spaceName == "" { + return "", "", fmt.Errorf("couldn't find any space") + } else if clusterName != "" { + return "", "", fmt.Errorf("couldn't find space %s in cluster %s", ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) + } + + return "", "", fmt.Errorf("couldn't find space %s", ansi.Color(spaceName, "white+b")) + } else if len(questionOptions) == 1 { + return matchedSpaces[0].Space.Name, matchedSpaces[0].Cluster, nil + } + + selectedSpace, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a space", + DefaultValue: questionOptions[defaultIndex], + Options: questionOptions, + }) + if err != nil { + return "", "", err + } + + for idx, s := range questionOptions { + if s == selectedSpace { + clusterName = matchedSpaces[idx].Cluster + spaceName = matchedSpaces[idx].Space.Name + break + } + } + + return spaceName, clusterName, nil +} + +func GetCurrentUser(ctx context.Context, managementClient kube.Interface) (*managementv1.UserInfo, *clusterv1.EntityInfo, error) { + self, err := managementClient.Loft().ManagementV1().Selves().Create(ctx, &managementv1.Self{}, metav1.CreateOptions{}) + if err != nil { + return nil, nil, fmt.Errorf("get self: %w", err) + } else if self.Status.User == nil && self.Status.Team == nil { + return nil, nil, fmt.Errorf("no user or team name returned") + } + + return self.Status.User, self.Status.Team, nil +} + +func SelectVirtualClusterAndSpaceAndClusterName(ctx context.Context, baseClient client.Client, virtualClusterName, spaceName, clusterName string, log log.Logger) (string, string, string, error) { + virtualClusters, err := GetVirtualClusters(ctx, baseClient, log) + if err != nil { + return "", "", "", err + } + + currentContext, err := kubeconfig.CurrentContext() + if err != nil { + return "", "", "", fmt.Errorf("loading kubernetes config: %w", err) + } + + isLoftContext, cluster, namespace, vCluster := kubeconfig.ParseContext(currentContext) + matchedVClusters := []ClusterVirtualCluster{} + questionOptionsUnformatted := [][]string{} + defaultIndex := 0 + for _, virtualCluster := range virtualClusters { + if virtualClusterName != "" && virtualCluster.VirtualCluster.Name != virtualClusterName { + continue + } else if spaceName != "" && virtualCluster.VirtualCluster.Namespace != spaceName { + continue + } else if clusterName != "" && virtualCluster.Cluster != clusterName { + continue + } + + if isLoftContext && vCluster == virtualCluster.VirtualCluster.Name && cluster == virtualCluster.Cluster && namespace == virtualCluster.VirtualCluster.Namespace { + defaultIndex = len(questionOptionsUnformatted) + } + + matchedVClusters = append(matchedVClusters, virtualCluster) + vClusterName := virtualCluster.VirtualCluster.Name + if virtualCluster.VirtualCluster.Annotations != nil && virtualCluster.VirtualCluster.Annotations["loft.sh/display-name"] != "" { + vClusterName = virtualCluster.VirtualCluster.Annotations["loft.sh/display-name"] + " (" + vClusterName + ")" + } + + questionOptionsUnformatted = append(questionOptionsUnformatted, []string{vClusterName, virtualCluster.VirtualCluster.Namespace, virtualCluster.Cluster}) + } + + questionOptions := formatOptions("vCluster: %s | Space: %s | Cluster: %s", questionOptionsUnformatted) + if len(questionOptions) == 0 { + if virtualClusterName == "" { + return "", "", "", fmt.Errorf("couldn't find any virtual cluster") + } else if spaceName != "" { + return "", "", "", fmt.Errorf("couldn't find virtualcluster %s in space %s in cluster %s", ansi.Color(virtualClusterName, "white+b"), ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) + } else if clusterName != "" { + return "", "", "", fmt.Errorf("couldn't find virtualcluster %s in space %s in cluster %s", ansi.Color(virtualClusterName, "white+b"), ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) + } + + return "", "", "", fmt.Errorf("couldn't find virtual cluster %s", ansi.Color(virtualClusterName, "white+b")) + } else if len(questionOptions) == 1 { + return matchedVClusters[0].VirtualCluster.Name, matchedVClusters[0].VirtualCluster.Namespace, matchedVClusters[0].Cluster, nil + } + + selectedSpace, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a virtual cluster to use", + DefaultValue: questionOptions[defaultIndex], + Options: questionOptions, + }) + if err != nil { + return "", "", "", err + } + + for idx, s := range questionOptions { + if s == selectedSpace { + clusterName = matchedVClusters[idx].Cluster + virtualClusterName = matchedVClusters[idx].VirtualCluster.Name + spaceName = matchedVClusters[idx].VirtualCluster.Namespace + break + } + } + + return virtualClusterName, spaceName, clusterName, nil +} + +func formatOptions(format string, options [][]string) []string { + if len(options) == 0 { + return []string{} + } + + columnLengths := make([]int, len(options[0])) + for _, row := range options { + for i, column := range row { + if len(column) > columnLengths[i] { + columnLengths[i] = len(column) + } + } + } + + retOptions := []string{} + for _, row := range options { + columns := []interface{}{} + for i := range row { + value := row[i] + if columnLengths[i] > len(value) { + value = value + strings.Repeat(" ", columnLengths[i]-len(value)) + } + + columns = append(columns, value) + } + + retOptions = append(retOptions, fmt.Sprintf(format, columns...)) + } + + return retOptions +} + +func getProjectSpaceInstance(ctx context.Context, managementClient kube.Interface, project *managementv1.Project, spaceName string) (*SpaceInstanceProject, error) { + spaceInstance := &managementv1.SpaceInstance{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("spaceinstances"). + Namespace(naming.ProjectNamespace(project.Name)). + Name(spaceName). + VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(spaceInstance) + if err != nil { + return nil, err + } + + if !spaceInstance.Status.CanUse { + return nil, fmt.Errorf("no use access") + } + + return &SpaceInstanceProject{ + SpaceInstance: spaceInstance, + Project: project, + }, nil +} + +func getProjectSpaceInstances(ctx context.Context, managementClient kube.Interface, project *managementv1.Project) ([]*SpaceInstanceProject, error) { + spaceInstanceList := &managementv1.SpaceInstanceList{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("spaceinstances"). + Namespace(naming.ProjectNamespace(project.Name)). + VersionedParams(&metav1.ListOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(spaceInstanceList) + if err != nil { + return nil, err + } + + var spaces []*SpaceInstanceProject + for _, spaceInstance := range spaceInstanceList.Items { + if !spaceInstance.Status.CanUse { + continue + } + + s := spaceInstance + spaces = append(spaces, &SpaceInstanceProject{ + SpaceInstance: &s, + Project: project, + }) + } + return spaces, nil +} + +func getProjectVirtualClusterInstance(ctx context.Context, managementClient kube.Interface, project *managementv1.Project, virtualClusterName string) (*VirtualClusterInstanceProject, error) { + virtualClusterInstance := &managementv1.VirtualClusterInstance{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("virtualclusterinstances"). + Namespace(naming.ProjectNamespace(project.Name)). + Name(virtualClusterName). + VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(virtualClusterInstance) + if err != nil { + return nil, err + } + + if !virtualClusterInstance.Status.CanUse { + return nil, fmt.Errorf("no use access") + } + + return &VirtualClusterInstanceProject{ + VirtualCluster: virtualClusterInstance, + Project: project, + }, nil +} + +func getProjectVirtualClusterInstances(ctx context.Context, managementClient kube.Interface, project *managementv1.Project) ([]*VirtualClusterInstanceProject, error) { + virtualClusterInstanceList := &managementv1.VirtualClusterInstanceList{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("virtualclusterinstances"). + Namespace(naming.ProjectNamespace(project.Name)). + VersionedParams(&metav1.ListOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(virtualClusterInstanceList) + if err != nil { + return nil, err + } + + var virtualClusters []*VirtualClusterInstanceProject + for _, virtualClusterInstance := range virtualClusterInstanceList.Items { + if !virtualClusterInstance.Status.CanUse { + continue + } + + v := virtualClusterInstance + virtualClusters = append(virtualClusters, &VirtualClusterInstanceProject{ + VirtualCluster: &v, + Project: project, + }) + } + return virtualClusters, nil +} diff --git a/pkg/loftclient/naming/naming.go b/pkg/loftclient/naming/naming.go new file mode 100644 index 0000000000..e2952bfe2c --- /dev/null +++ b/pkg/loftclient/naming/naming.go @@ -0,0 +1,24 @@ +package naming + +import ( + "crypto/sha256" + "encoding/hex" + "strings" +) + +func ProjectNamespace(projectName string) string { + return "loft-p-" + projectName +} + +func SafeConcatName(name ...string) string { + return SafeConcatNameMax(name, 63) +} + +func SafeConcatNameMax(name []string, max int) string { + fullPath := strings.Join(name, "-") + if len(fullPath) > max { + digest := sha256.Sum256([]byte(fullPath)) + return fullPath[0:max-8] + "-" + hex.EncodeToString(digest[0:])[0:7] + } + return fullPath +} diff --git a/pkg/loftconfig/variables.go b/pkg/loftconfig/variables.go new file mode 100644 index 0000000000..de91b7bb77 --- /dev/null +++ b/pkg/loftconfig/variables.go @@ -0,0 +1,21 @@ +package config + +import ( + "os" + "time" +) + +const ( + defaultTimeout = 10 * time.Minute + timeoutEnvVariable = "LOFT_TIMEOUT" +) + +func Timeout() time.Duration { + if timeout := os.Getenv(timeoutEnvVariable); timeout != "" { + if parsedTimeout, err := time.ParseDuration(timeout); err == nil { + return parsedTimeout + } + } + + return defaultTimeout +} diff --git a/pkg/loftutils/positional_args.go b/pkg/loftutils/positional_args.go new file mode 100644 index 0000000000..08f9f23fa1 --- /dev/null +++ b/pkg/loftutils/positional_args.go @@ -0,0 +1,69 @@ +package util + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +var ( + SpaceNameOnlyUseLine string + SpaceNameOnlyValidator cobra.PositionalArgs + + VClusterNameOnlyUseLine string + + VClusterNameOnlyValidator cobra.PositionalArgs +) + +func init() { + SpaceNameOnlyUseLine, SpaceNameOnlyValidator = NamedPositionalArgsValidator(true, true, "SPACE_NAME") + VClusterNameOnlyUseLine, VClusterNameOnlyValidator = NamedPositionalArgsValidator(true, true, "VCLUSTER_NAME") +} + +// NamedPositionalArgsValidator returns a cobra.PositionalArgs that returns a helpful +// error message if the arg number doesn't match. +// It also returns a string that can be appended to the cobra useline +// +// Example output for extra arguments with : +// +// $ command arg asdf +// [fatal] command ARG_1 [flags] +// Invalid Args: received 2 arguments, expected 1, extra arguments: "asdf" +// Run with --help for more details +// +// Example output for missing arguments: +// +// $ command +// [fatal] command ARG_1 [flags] +// Invalid Args: received 0 arguments, expected 1, please specify missing: "ARG_!" +// Run with --help for more details on arguments +func NamedPositionalArgsValidator(failMissing, failExtra bool, expectedArgs ...string) (string, cobra.PositionalArgs) { + return " " + strings.Join(expectedArgs, " "), func(cmd *cobra.Command, args []string) error { + numExpectedArgs := len(expectedArgs) + numArgs := len(args) + numMissing := numExpectedArgs - numArgs + + if numMissing == 0 { + return nil + } + + // didn't receive as many arguments as expected + if numMissing > 0 && failMissing { + // the last numMissing expectedArgs + missingKeys := strings.Join(expectedArgs[len(expectedArgs)-(numMissing):], ", ") + return fmt.Errorf("%s\nInvalid Args: received %d arguments, expected %d, please specify missing: %q\nRun with --help for more details on arguments", cmd.UseLine(), numArgs, numExpectedArgs, missingKeys) + } + + // received more than expected + if numMissing < 0 && failExtra { + // received more than expected + numExtra := -numMissing + // the last numExtra args + extraValues := strings.Join(args[len(args)-numExtra:], ", ") + return fmt.Errorf("%s\nInvalid Args: received %d arguments, expected %d, extra arguments: %q\nRun with --help for more details on arguments", cmd.UseLine(), numArgs, numExpectedArgs, extraValues) + } + + return nil + } +} diff --git a/pkg/loftutils/positional_args_test.go b/pkg/loftutils/positional_args_test.go new file mode 100644 index 0000000000..df08227aad --- /dev/null +++ b/pkg/loftutils/positional_args_test.go @@ -0,0 +1,55 @@ +package util + +import ( + "fmt" + "testing" + + "github.com/spf13/cobra" + "gotest.tools/v3/assert" +) + +func TestNamedPositionalArgsValidator(t *testing.T) { + // loop through a generated variety of inputs: arg counts, expected arg counts, and failMissing + // since it depends on the numbers, it's easier to loop than writing a testable + maxExpectedArgCount := 5 + maxActualArgsCount := maxExpectedArgCount + 5 + expectedArgs := []string{} + testNum := 0 + // loop through maxExpectedArgCount lengths of expectedArgs + for len(expectedArgs) <= maxExpectedArgCount { + actualArgs := []string{} + // loop through maxActualArgCount lengths of actualArgs + for len(actualArgs) <= maxActualArgsCount { + defer func() { + panicErr := recover() + if panicErr != nil { + t.Fatalf("this function should never panic: %+v", panicErr) + } + }() + testNum += 1 + // loop through both values of failMissing + for _, failMissing := range []bool{true, false} { + for _, failExtra := range []bool{true, false} { + // execute test + t.Logf("running test #%d with failMissing %v, failExtra %v, expectedArgs: %q, args: %q", testNum, failMissing, failExtra, expectedArgs, actualArgs) + // if testNum == 23 { + // t.Log("focus a test number number for debugging") + // } + _, validator := NamedPositionalArgsValidator(failMissing, failExtra, expectedArgs...) + err := validator(&cobra.Command{}, actualArgs) + if len(actualArgs) > len(expectedArgs) && failExtra { + assert.ErrorContains(t, err, "extra arguments:", "expect error to not be nil as arg count is mismatched") + } else if len(actualArgs) < len(expectedArgs) && failMissing { + assert.ErrorContains(t, err, "please specify missing:", "expect error to not be nil as arg count is mismatched") + } else { + assert.NilError(t, err, "expect error to be nil as all args provided and no extra") + } + // append to actual args + actualArgs = append(actualArgs, fmt.Sprintf("ARG_%d", len(actualArgs))) + } + } + } + // append to expected args + expectedArgs = append(expectedArgs, fmt.Sprintf("ARG_NAME_%d", len(expectedArgs))) + } +} diff --git a/pkg/loftutils/util.go b/pkg/loftutils/util.go new file mode 100644 index 0000000000..bda6399221 --- /dev/null +++ b/pkg/loftutils/util.go @@ -0,0 +1,26 @@ +package util + +import ( + "errors" + + kerrors "k8s.io/apimachinery/pkg/api/errors" +) + +func GetCause(err error) string { + if err == nil { + return "" + } + + var statusErr *kerrors.StatusError + + if errors.As(err, &statusErr) { + details := statusErr.Status().Details + if details != nil && len(details.Causes) > 0 { + return details.Causes[0].Message + } + + return statusErr.Error() + } + + return err.Error() +} diff --git a/pkg/platformdefaults/defaults.go b/pkg/platformdefaults/defaults.go new file mode 100644 index 0000000000..1332b3372a --- /dev/null +++ b/pkg/platformdefaults/defaults.go @@ -0,0 +1,112 @@ +package platformdefaults + +import ( + "encoding/json" + "os" + "path/filepath" + + "github.com/loft-sh/loftctl/v4/pkg/client" + "github.com/pkg/errors" +) + +const ( + KeyProject = "project" +) + +var ( + ConfigFile = "defaults.json" + ConfigFolder = client.CacheFolder + + DefaultKeys = []string{KeyProject} +) + +// Defaults holds the default values +type Defaults struct { + folderPath string + fileName string + fullPath string + + values map[string]string +} + +// NewFromPath creates a new defaults instance from the given path +func NewFromPath(folderPath string, fileName string) (*Defaults, error) { + fullPath := filepath.Join(folderPath, fileName) + defaults := &Defaults{folderPath, fileName, fullPath, make(map[string]string)} + + if err := defaults.ensureConfigFile(); err != nil { + return defaults, errors.Wrap(err, "no config file") + } + + contents, err := os.ReadFile(fullPath) + if err != nil { + return defaults, errors.Wrap(err, "read config file") + } + if len(contents) == 0 { + return defaults, nil + } + if err = json.Unmarshal(contents, &defaults.values); err != nil { + return defaults, errors.Wrap(err, "invalid json") + } + + return defaults, nil +} + +// Set sets the given key to the given value and persists the defaults on disk +func (d *Defaults) Set(key string, value string) error { + if !IsSupportedKey(key) { + return errors.Errorf("key %s is not supported", key) + } + + d.values[key] = value + json, err := json.Marshal(d.values) + if err != nil { + return errors.Wrap(err, "invalid json") + } + if err = os.WriteFile(d.fullPath, json, os.ModePerm); err != nil { + return errors.Wrap(err, "write config file") + } + + return nil +} + +// Get returns the value for the given key +func (d *Defaults) Get(key string, fallback string) (string, error) { + if !IsSupportedKey(key) { + return fallback, errors.Errorf("key %s is not supported", key) + } + + return d.values[key], nil +} + +// IsSupportedKey returns true if the given key is supported +func IsSupportedKey(key string) bool { + for _, k := range DefaultKeys { + if k == key { + return true + } + } + + return false +} + +func (d *Defaults) ensureConfigFile() error { + _, err := os.Stat(d.fullPath) + // file exists + if err == nil { + return nil + } + + if os.IsNotExist(err) { + if err := os.MkdirAll(d.folderPath, os.ModePerm); err != nil { + return errors.Wrap(err, "create cache folder") + } + if _, err := os.Create(d.fullPath); err != nil { + return errors.Wrap(err, "create defaults file") + } + + return nil + } else { + return err + } +}