From 43642b2fe9824198ace8ebfc351b16b63e85fc83 Mon Sep 17 00:00:00 2001 From: facchettos Date: Thu, 16 May 2024 14:01:01 +0200 Subject: [PATCH 01/17] added cluster list --- cmd/vclusterctl/cmd/platform/access_key.go | 4 +- cmd/vclusterctl/cmd/platform/add/add.go | 22 + cmd/vclusterctl/cmd/platform/add/cluster.go | 306 +++++ .../cmd/platform/connect/cluster.go | 337 ++--- .../cmd/platform/connect/connect.go | 27 +- cmd/vclusterctl/cmd/platform/get/cluster.go | 182 +++ cmd/vclusterctl/cmd/platform/get/get.go | 22 + cmd/vclusterctl/cmd/platform/import.go | 2 +- cmd/vclusterctl/cmd/platform/list/clusters.go | 81 ++ cmd/vclusterctl/cmd/platform/list/list.go | 23 + cmd/vclusterctl/cmd/platform/platform.go | 16 +- cmd/vclusterctl/cmd/platform/pro.go | 18 +- cmd/vclusterctl/cmd/platform/reset.go | 8 +- cmd/vclusterctl/cmd/platform/start.go | 6 +- go.mod | 2 +- pkg/cli/reset/password.go | 180 +++ pkg/cli/reset/reset.go | 21 + pkg/cli/start/docker.go | 420 ++++++ pkg/cli/start/login.go | 121 ++ pkg/cli/start/port_forwarding.go | 68 + pkg/cli/start/start.go | 305 +++++ pkg/cli/start/success.go | 247 ++++ pkg/cli/start/upgrade.go | 88 ++ pkg/platform/clihelper/clihelper.go | 773 +++++++++++ pkg/platform/defaults/defaults.go | 111 ++ pkg/platform/kube/client.go | 54 + pkg/platform/kubeconfig/kubeconfig.go | 266 ++++ pkg/platform/loftclient/client.go | 627 +++++++++ pkg/platform/loftclient/config.go | 63 + pkg/platform/loftclient/helper/helper.go | 1160 +++++++++++++++++ pkg/platform/loftclient/naming/naming.go | 24 + pkg/platform/loftconfig/variables.go | 21 + pkg/platform/loftutils/positional_args.go | 69 + .../loftutils/positional_args_test.go | 55 + pkg/platform/loftutils/util.go | 26 + 35 files changed, 5509 insertions(+), 246 deletions(-) create mode 100644 cmd/vclusterctl/cmd/platform/add/add.go create mode 100644 cmd/vclusterctl/cmd/platform/add/cluster.go create mode 100644 cmd/vclusterctl/cmd/platform/get/cluster.go create mode 100644 cmd/vclusterctl/cmd/platform/get/get.go create mode 100644 cmd/vclusterctl/cmd/platform/list/clusters.go create mode 100644 cmd/vclusterctl/cmd/platform/list/list.go create mode 100644 pkg/cli/reset/password.go create mode 100644 pkg/cli/reset/reset.go create mode 100644 pkg/cli/start/docker.go create mode 100644 pkg/cli/start/login.go create mode 100644 pkg/cli/start/port_forwarding.go create mode 100644 pkg/cli/start/start.go create mode 100644 pkg/cli/start/success.go create mode 100644 pkg/cli/start/upgrade.go create mode 100644 pkg/platform/clihelper/clihelper.go create mode 100644 pkg/platform/defaults/defaults.go create mode 100644 pkg/platform/kube/client.go create mode 100644 pkg/platform/kubeconfig/kubeconfig.go create mode 100644 pkg/platform/loftclient/client.go create mode 100644 pkg/platform/loftclient/config.go create mode 100644 pkg/platform/loftclient/helper/helper.go create mode 100644 pkg/platform/loftclient/naming/naming.go create mode 100644 pkg/platform/loftconfig/variables.go create mode 100644 pkg/platform/loftutils/positional_args.go create mode 100644 pkg/platform/loftutils/positional_args_test.go create mode 100644 pkg/platform/loftutils/util.go diff --git a/cmd/vclusterctl/cmd/platform/access_key.go b/cmd/vclusterctl/cmd/platform/access_key.go index 406cd1cdb..85d0488d2 100644 --- a/cmd/vclusterctl/cmd/platform/access_key.go +++ b/cmd/vclusterctl/cmd/platform/access_key.go @@ -7,9 +7,9 @@ import ( "os" "github.com/loft-sh/api/v4/pkg/product" - "github.com/loft-sh/loftctl/v4/cmd/loftctl/flags" - "github.com/loft-sh/loftctl/v4/pkg/client" "github.com/loft-sh/log" + "github.com/loft-sh/vcluster/pkg/cli/flags" + client "github.com/loft-sh/vcluster/pkg/platform/loftclient" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" diff --git a/cmd/vclusterctl/cmd/platform/add/add.go b/cmd/vclusterctl/cmd/platform/add/add.go new file mode 100644 index 000000000..dad6b9425 --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/add/add.go @@ -0,0 +1,22 @@ +package add + +import ( + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/spf13/cobra" +) + +// NewAddCmd creates a new command +func NewAddCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + addCmd := &cobra.Command{ + Use: "add", + Short: "Adds a cluster to vCluster platform", + Long: `####################################################### +########### vcluster platform add ################# +####################################################### + `, + Args: cobra.NoArgs, + } + + addCmd.AddCommand(NewClusterCmd(globalFlags)) + return addCmd +} diff --git a/cmd/vclusterctl/cmd/platform/add/cluster.go b/cmd/vclusterctl/cmd/platform/add/cluster.go new file mode 100644 index 000000000..8814f302f --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/add/cluster.go @@ -0,0 +1,306 @@ +package add + +import ( + "cmp" + "context" + "errors" + "fmt" + "os" + "os/exec" + "time" + + "github.com/loft-sh/log" + "github.com/loft-sh/vcluster/pkg/platform/clihelper" + "github.com/loft-sh/vcluster/pkg/platform/kube" + client "github.com/loft-sh/vcluster/pkg/platform/loftclient" + "github.com/loft-sh/vcluster/pkg/platform/loftclient/helper" + "github.com/sirupsen/logrus" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" + + managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" + storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/upgrade" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +type ClusterCmd struct { + Log log.Logger + *flags.GlobalFlags + Namespace string + ServiceAccount string + DisplayName string + Context string + Insecure bool + Wait bool + HelmChartPath string + HelmChartVersion string + HelmSet []string + HelmValues []string +} + +// NewClusterCmd creates a new command +func NewClusterCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + cmd := &ClusterCmd{ + GlobalFlags: globalFlags, + Log: log.GetInstance(), + } + + c := &cobra.Command{ + Use: "cluster", + Short: "add current cluster to vCluster platform", + Long: `####################################################### +########## vcluster platform add cluster ########## +####################################################### +Adds a cluster to the vCluster platform instance. + +Example: +vcluster platform add cluster my-cluster +######################################################## + `, + Args: cobra.ExactArgs(1), + RunE: func(cobraCmd *cobra.Command, args []string) error { + // Check for newer version + upgrade.PrintNewerVersionWarning() + + return cmd.Run(cobraCmd.Context(), args) + }, + } + + c.Flags().StringVar(&cmd.Namespace, "namespace", "loft", "The namespace to generate the service account in. The namespace will be created if it does not exist") + c.Flags().StringVar(&cmd.ServiceAccount, "service-account", "loft-admin", "The service account name to create") + c.Flags().StringVar(&cmd.DisplayName, "display-name", "", "The display name to show in the UI for this cluster") + c.Flags().BoolVar(&cmd.Wait, "wait", false, "If true, will wait until the cluster is initialized") + c.Flags().BoolVar(&cmd.Insecure, "insecure", false, "If true, deploys the agent in insecure mode") + c.Flags().StringVar(&cmd.HelmChartVersion, "helm-chart-version", "", "The agent chart version to deploy") + c.Flags().StringVar(&cmd.HelmChartPath, "helm-chart-path", "", "The agent chart to deploy") + c.Flags().StringArrayVar(&cmd.HelmSet, "helm-set", []string{}, "Extra helm values for the agent chart") + c.Flags().StringArrayVar(&cmd.HelmValues, "helm-values", []string{}, "Extra helm values for the agent chart") + c.Flags().StringVar(&cmd.Context, "context", "", "The kube context to use for installation") + + return c +} + +func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { + // Get clusterName from command argument + clusterName := args[0] + + baseClient, err := client.NewClientFromPath(cmd.Config) + if err != nil { + return fmt.Errorf("new client from path: %w", err) + } + + err = client.VerifyVersion(baseClient) + if err != nil { + return fmt.Errorf("verify loft version: %w", err) + } + + managementClient, err := baseClient.Management() + if err != nil { + return fmt.Errorf("create management client: %w", err) + } + + // get user details + user, team, err := getUserOrTeam(ctx, managementClient) + if err != nil { + return fmt.Errorf("get user or team: %w", err) + } + + loftVersion, err := baseClient.Version() + if err != nil { + return fmt.Errorf("get loft version: %w", err) + } + + // TODO(ThomasK33): Eventually change this into an Apply instead of a Create call + _, err = managementClient.Loft().ManagementV1().Clusters().Create(ctx, &managementv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + }, + Spec: managementv1.ClusterSpec{ + ClusterSpec: storagev1.ClusterSpec{ + DisplayName: cmd.DisplayName, + Owner: &storagev1.UserOrTeam{ + User: user, + Team: team, + }, + NetworkPeer: true, + Access: getAccess(user, team), + }, + }, + }, metav1.CreateOptions{}) + if err != nil && !kerrors.IsAlreadyExists(err) { + return fmt.Errorf("create cluster: %w", err) + } + + accessKey, err := managementClient.Loft().ManagementV1().Clusters().GetAccessKey(ctx, clusterName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("get cluster access key: %w", err) + } + + namespace := cmd.Namespace + + helmArgs := []string{ + "upgrade", "loft", + } + + if os.Getenv("DEVELOPMENT") == "true" { + helmArgs = []string{ + "upgrade", "--install", "loft", "./chart", + "--create-namespace", + "--namespace", namespace, + "--set", "agentOnly=true", + "--set", "image=" + cmp.Or(os.Getenv("DEVELOPMENT_IMAGE"), "ghcr.io/loft-sh/enterprise:release-test"), + } + } else { + if cmd.HelmChartPath != "" { + helmArgs = append(helmArgs, cmd.HelmChartPath) + } else { + helmArgs = append(helmArgs, "loft", "--repo", "https://charts.loft.sh") + } + + if loftVersion.Version != "" { + helmArgs = append(helmArgs, "--version", loftVersion.Version) + } + + if cmd.HelmChartVersion != "" { + helmArgs = append(helmArgs, "--version", cmd.HelmChartVersion) + } + + // general arguments + helmArgs = append(helmArgs, "--install", "--create-namespace", "--namespace", cmd.Namespace, "--set", "agentOnly=true") + } + + for _, set := range cmd.HelmSet { + helmArgs = append(helmArgs, "--set", set) + } + for _, values := range cmd.HelmValues { + helmArgs = append(helmArgs, "--values", values) + } + + if accessKey.LoftHost != "" { + helmArgs = append(helmArgs, "--set", "url="+accessKey.LoftHost) + } + + if accessKey.AccessKey != "" { + helmArgs = append(helmArgs, "--set", "token="+accessKey.AccessKey) + } + + if cmd.Insecure || accessKey.Insecure || baseClient.Config().Insecure { + helmArgs = append(helmArgs, "--set", "insecureSkipVerify=true") + } + + if accessKey.CaCert != "" { + helmArgs = append(helmArgs, "--set", "additionalCA="+accessKey.CaCert) + } + + if cmd.Wait { + helmArgs = append(helmArgs, "--wait") + } + + if cmd.Context != "" { + helmArgs = append(helmArgs, "--kube-context", cmd.Context) + } + + kubeClientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}) + + if cmd.Context != "" { + kubeConfig, err := kubeClientConfig.RawConfig() + if err != nil { + return fmt.Errorf("there is an error loading your current kube config (%w), please make sure you have access to a kubernetes cluster and the command `kubectl get namespaces` is working", err) + } + + kubeClientConfig = clientcmd.NewNonInteractiveClientConfig(kubeConfig, cmd.Context, &clientcmd.ConfigOverrides{}, clientcmd.NewDefaultClientConfigLoadingRules()) + } + + config, err := kubeClientConfig.ClientConfig() + if err != nil { + return fmt.Errorf("there is an error loading your current kube config (%w), please make sure you have access to a kubernetes cluster and the command `kubectl get namespaces` is working", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("create kube client: %w", err) + } + + errChan := make(chan error) + + go func() { + helmCmd := exec.CommandContext(ctx, "helm", helmArgs...) + + helmCmd.Stdout = cmd.Log.Writer(logrus.DebugLevel, true) + helmCmd.Stderr = cmd.Log.Writer(logrus.DebugLevel, true) + helmCmd.Stdin = os.Stdin + + cmd.Log.Info("Installing Loft agent...") + cmd.Log.Debugf("Running helm command: %v", helmCmd.Args) + + err = helmCmd.Run() + if err != nil { + errChan <- fmt.Errorf("failed to install loft chart: %w", err) + } + + close(errChan) + }() + + _, err = clihelper.WaitForReadyLoftPod(ctx, clientset, namespace, cmd.Log) + if err = errors.Join(err, <-errChan); err != nil { + return fmt.Errorf("wait for loft pod: %w", err) + } + + if cmd.Wait { + cmd.Log.Info("Waiting for the cluster to be initialized...") + waitErr := wait.PollUntilContextTimeout(ctx, time.Second, 5*time.Minute, false, func(ctx context.Context) (done bool, err error) { + clusterInstance, err := managementClient.Loft().ManagementV1().Clusters().Get(ctx, clusterName, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return false, err + } + + return clusterInstance.Status.Phase == storagev1.ClusterStatusPhaseInitialized, nil + }) + if waitErr != nil { + return fmt.Errorf("get cluster: %w", waitErr) + } + } + + cmd.Log.Donef("Successfully added cluster %s to Loft", clusterName) + + return nil +} + +func getUserOrTeam(ctx context.Context, managementClient kube.Interface) (string, string, error) { + var user, team string + + userName, teamName, err := helper.GetCurrentUser(ctx, managementClient) + if err != nil { + return "", "", fmt.Errorf("get current user: %w", err) + } + + if userName != nil { + user = userName.Name + } else { + team = teamName.Name + } + + return user, team, nil +} + +func getAccess(user, team string) []storagev1.Access { + access := []storagev1.Access{ + { + Verbs: []string{"*"}, + Subresources: []string{"*"}, + }, + } + + if team != "" { + access[0].Teams = []string{team} + } else { + access[0].Users = []string{user} + } + + return access +} diff --git a/cmd/vclusterctl/cmd/platform/connect/cluster.go b/cmd/vclusterctl/cmd/platform/connect/cluster.go index 8245597e8..ea56c8a8e 100644 --- a/cmd/vclusterctl/cmd/platform/connect/cluster.go +++ b/cmd/vclusterctl/cmd/platform/connect/cluster.go @@ -1,280 +1,211 @@ package connect import ( - "cmp" "context" - "errors" + "encoding/base64" "fmt" "os" - "os/exec" - "time" - - "github.com/loft-sh/loftctl/v4/pkg/client" - "github.com/loft-sh/loftctl/v4/pkg/client/helper" - "github.com/loft-sh/loftctl/v4/pkg/clihelper" - "github.com/loft-sh/loftctl/v4/pkg/kube" - "github.com/loft-sh/log" - "github.com/sirupsen/logrus" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/wait" + "strings" managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" - storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" - "github.com/loft-sh/loftctl/v4/cmd/loftctl/flags" - "github.com/loft-sh/loftctl/v4/pkg/upgrade" + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/log" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/platform/kubeconfig" + client "github.com/loft-sh/vcluster/pkg/platform/loftclient" + "github.com/loft-sh/vcluster/pkg/platform/loftclient/helper" + "github.com/loft-sh/vcluster/pkg/upgrade" + "github.com/mgutz/ansi" "github.com/spf13/cobra" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" ) +const ( + // LoftDirectClusterEndpoint is a cluster annotation that tells the loft cli to use this endpoint instead of + // the default loft server address to connect to this cluster. + LoftDirectClusterEndpoint = "loft.sh/direct-cluster-endpoint" + + // LoftDirectClusterEndpointInsecure is a cluster annotation that tells the loft cli to allow untrusted certificates + LoftDirectClusterEndpointInsecure = "loft.sh/direct-cluster-endpoint-insecure" + + // LoftDirectClusterEndpointCaData is a cluster annotation that tells the loft cli which cluster ca data to use + LoftDirectClusterEndpointCaData = "loft.sh/direct-cluster-endpoint-ca-data" +) + +// ClusterCmd holds the cmd flags type ClusterCmd struct { - Log log.Logger *flags.GlobalFlags - Namespace string - ServiceAccount string - DisplayName string - Context string - Insecure bool - Wait bool + + Print bool + DisableDirectClusterEndpoint bool + + log log.Logger } // NewClusterCmd creates a new command func NewClusterCmd(globalFlags *flags.GlobalFlags) *cobra.Command { cmd := &ClusterCmd{ GlobalFlags: globalFlags, - Log: log.GetInstance(), + log: log.GetInstance(), } - c := &cobra.Command{ - Use: "cluster", - Short: "connect current cluster to vCluster platform", - Long: `####################################################### -########## vcluster platform connect cluster ########## -####################################################### -Connect a cluster to the vCluster platform instance. + description := product.ReplaceWithHeader("use cluster", ` +Creates a new kube context for the given cluster, if +it does not yet exist. Example: -vcluster platform connect cluster my-cluster +vcluster platform connect cluster mycluster ######################################################## - `, - Args: cobra.ExactArgs(1), + `) + c := &cobra.Command{ + Use: "cluster", + Short: "Creates a kube context for the given cluster", + Long: description, + Args: cobra.MaximumNArgs(1), RunE: func(cobraCmd *cobra.Command, args []string) error { // Check for newer version - upgrade.PrintNewerVersionWarning() + if !cmd.Print { + upgrade.PrintNewerVersionWarning() + } return cmd.Run(cobraCmd.Context(), args) }, } - c.Flags().StringVar(&cmd.Namespace, "namespace", "loft", "The namespace to generate the service account in. The namespace will be created if it does not exist") - c.Flags().StringVar(&cmd.ServiceAccount, "service-account", "loft-admin", "The service account name to create") - c.Flags().StringVar(&cmd.DisplayName, "display-name", "", "The display name to show in the UI for this cluster") - c.Flags().BoolVar(&cmd.Wait, "wait", false, "If true, will wait until the cluster is initialized") - c.Flags().BoolVar(&cmd.Insecure, "insecure", false, "If true, deploys the agent in insecure mode") - c.Flags().StringVar(&cmd.Context, "context", "", "The kube context to use for installation") - + c.Flags().BoolVar(&cmd.Print, "print", false, "When enabled prints the context to stdout") + c.Flags().BoolVar(&cmd.DisableDirectClusterEndpoint, "disable-direct-cluster-endpoint", false, "When enabled does not use an available direct cluster endpoint to connect to the cluster") return c } +// Run executes the command func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { - // Get clusterName from command argument - clusterName := args[0] - baseClient, err := client.NewClientFromPath(cmd.Config) if err != nil { - return fmt.Errorf("new client from path: %w", err) - } - - err = client.VerifyVersion(baseClient) - if err != nil { - return fmt.Errorf("verify loft version: %w", err) + return err } managementClient, err := baseClient.Management() if err != nil { - return fmt.Errorf("create management client: %w", err) + return err } - // get user details - user, team, err := getUserOrTeam(ctx, managementClient) - if err != nil { - return fmt.Errorf("get user or team: %w", err) + // determine cluster name + clusterName := "" + if len(args) == 0 { + clusterName, err = helper.SelectCluster(ctx, baseClient, cmd.log) + if err != nil { + return err + } + } else { + clusterName = args[0] } - loftVersion, err := baseClient.Version() + // check if the cluster exists + cluster, err := managementClient.Loft().ManagementV1().Clusters().Get(ctx, clusterName, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("get loft version: %w", err) - } + if kerrors.IsForbidden(err) { + return fmt.Errorf("cluster '%s' does not exist, or you don't have permission to use it", clusterName) + } - // TODO(ThomasK33): Eventually change this into an Apply instead of a Create call - _, err = managementClient.Loft().ManagementV1().Clusters().Create(ctx, &managementv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - }, - Spec: managementv1.ClusterSpec{ - ClusterSpec: storagev1.ClusterSpec{ - DisplayName: cmd.DisplayName, - Owner: &storagev1.UserOrTeam{ - User: user, - Team: team, - }, - NetworkPeer: true, - Access: getAccess(user, team), - }, - }, - }, metav1.CreateOptions{}) - if err != nil && !kerrors.IsAlreadyExists(err) { - return fmt.Errorf("create cluster: %w", err) + return err } - accessKey, err := managementClient.Loft().ManagementV1().Clusters().GetAccessKey(ctx, clusterName, metav1.GetOptions{}) + // create kube context options + contextOptions, err := CreateClusterContextOptions(baseClient, cmd.Config, cluster, "", cmd.DisableDirectClusterEndpoint, true, cmd.log) if err != nil { - return fmt.Errorf("get cluster access key: %w", err) + return err } - namespace := cmd.Namespace - - helmArgs := []string{ - "upgrade", "--install", "loft", "loft", - "--repo", "https://charts.loft.sh", - "--create-namespace", - "--namespace", namespace, - "--set", "agentOnly=true", - } - - if os.Getenv("DEVELOPMENT") == "true" { - helmArgs = []string{ - "upgrade", "--install", "loft", "./chart", - "--create-namespace", - "--namespace", namespace, - "--set", "agentOnly=true", - "--set", "image=" + cmp.Or(os.Getenv("DEVELOPMENT_IMAGE"), "ghcr.io/loft-sh/enterprise:release-test"), + // check if we should print or update the config + if cmd.Print { + err = kubeconfig.PrintKubeConfigTo(contextOptions, os.Stdout) + if err != nil { + return err } - } else if loftVersion.Version != "" { - helmArgs = append(helmArgs, "--version", loftVersion.Version) - } - - if accessKey.LoftHost != "" { - helmArgs = append(helmArgs, "--set", "url="+accessKey.LoftHost) - } - - if accessKey.AccessKey != "" { - helmArgs = append(helmArgs, "--set", "token="+accessKey.AccessKey) - } - - if cmd.Insecure || accessKey.Insecure || baseClient.Config().Insecure { - helmArgs = append(helmArgs, "--set", "insecureSkipVerify=true") - } - - if accessKey.CaCert != "" { - helmArgs = append(helmArgs, "--set", "additionalCA="+accessKey.CaCert) - } - - if cmd.Wait { - helmArgs = append(helmArgs, "--wait") - } - - if cmd.Context != "" { - helmArgs = append(helmArgs, "--kube-context", cmd.Context) - } - - kubeClientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}) - - if cmd.Context != "" { - kubeConfig, err := kubeClientConfig.RawConfig() + } else { + // update kube config + err = kubeconfig.UpdateKubeConfig(contextOptions) if err != nil { - return fmt.Errorf("there is an error loading your current kube config (%w), please make sure you have access to a kubernetes cluster and the command `kubectl get namespaces` is working", err) + return err } - kubeClientConfig = clientcmd.NewNonInteractiveClientConfig(kubeConfig, cmd.Context, &clientcmd.ConfigOverrides{}, clientcmd.NewDefaultClientConfigLoadingRules()) + cmd.log.Donef("Successfully updated kube context to use cluster %s", ansi.Color(clusterName, "white+b")) } - config, err := kubeClientConfig.ClientConfig() - if err != nil { - return fmt.Errorf("there is an error loading your current kube config (%w), please make sure you have access to a kubernetes cluster and the command `kubectl get namespaces` is working", err) - } - - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return fmt.Errorf("create kube client: %w", err) - } - - errChan := make(chan error) - - go func() { - helmCmd := exec.CommandContext(ctx, "helm", helmArgs...) - - helmCmd.Stdout = cmd.Log.Writer(logrus.DebugLevel, true) - helmCmd.Stderr = cmd.Log.Writer(logrus.DebugLevel, true) - helmCmd.Stdin = os.Stdin - - cmd.Log.Info("Installing Loft agent...") - cmd.Log.Debugf("Running helm command: %v", helmCmd.Args) + return nil +} - err = helmCmd.Run() +//func findProjectCluster(ctx context.Context, baseClient client.Client, projectName, clusterName string) (*managementv1.Cluster, error) { +// managementClient, err := baseClient.Management() +// if err != nil { +// return nil, err +// } +// +// projectClusters, err := managementClient.Loft().ManagementV1().Projects().ListClusters(ctx, projectName, metav1.GetOptions{}) +// if err != nil { +// return nil, errors.Wrap(err, "list project clusters") +// } +// +// for _, cluster := range projectClusters.Clusters { +// if cluster.Name == clusterName { +// return &cluster, nil +// } +// } +// +// return nil, fmt.Errorf("couldn't find cluster %s in project %s", clusterName, projectName) +//} + +func CreateClusterContextOptions(baseClient client.Client, config string, cluster *managementv1.Cluster, spaceName string, disableClusterGateway, setActive bool, log log.Logger) (kubeconfig.ContextOptions, error) { + contextOptions := kubeconfig.ContextOptions{ + Name: kubeconfig.SpaceContextName(cluster.Name, spaceName), + ConfigPath: config, + CurrentNamespace: spaceName, + SetActive: setActive, + } + if !disableClusterGateway && cluster.Annotations != nil && cluster.Annotations[LoftDirectClusterEndpoint] != "" { + contextOptions = ApplyDirectClusterEndpointOptions(contextOptions, cluster, "/kubernetes/cluster", log) + _, err := baseClient.DirectClusterEndpointToken(true) if err != nil { - errChan <- fmt.Errorf("failed to install loft chart: %w", err) + return kubeconfig.ContextOptions{}, fmt.Errorf("retrieving direct cluster endpoint token: %w. Use --disable-direct-cluster-endpoint to create a context without using direct cluster endpoints", err) } - - close(errChan) - }() - - _, err = clihelper.WaitForReadyLoftPod(ctx, clientset, namespace, cmd.Log) - if err = errors.Join(err, <-errChan); err != nil { - return fmt.Errorf("wait for loft pod: %w", err) + } else { + contextOptions.Server = baseClient.Config().Host + "/kubernetes/cluster/" + cluster.Name + contextOptions.InsecureSkipTLSVerify = baseClient.Config().Insecure } - if cmd.Wait { - cmd.Log.Info("Waiting for the cluster to be initialized...") - waitErr := wait.PollUntilContextTimeout(ctx, time.Second, 5*time.Minute, false, func(ctx context.Context) (done bool, err error) { - clusterInstance, err := managementClient.Loft().ManagementV1().Clusters().Get(ctx, clusterName, metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return false, err - } - - return clusterInstance.Status.Phase == storagev1.ClusterStatusPhaseInitialized, nil - }) - if waitErr != nil { - return fmt.Errorf("get cluster: %w", waitErr) - } + data, err := retrieveCaData(cluster) + if err != nil { + return kubeconfig.ContextOptions{}, err } - - cmd.Log.Donef("Successfully connected cluster %s to Loft", clusterName) - - return nil + contextOptions.CaData = data + return contextOptions, nil } -func getUserOrTeam(ctx context.Context, managementClient kube.Interface) (string, string, error) { - var user, team string - - userName, teamName, err := helper.GetCurrentUser(ctx, managementClient) - if err != nil { - return "", "", fmt.Errorf("get current user: %w", err) +func ApplyDirectClusterEndpointOptions(options kubeconfig.ContextOptions, cluster *managementv1.Cluster, path string, log log.Logger) kubeconfig.ContextOptions { + server := strings.TrimSuffix(cluster.Annotations[LoftDirectClusterEndpoint], "/") + if !strings.HasPrefix(server, "https://") { + server = "https://" + server } - if userName != nil { - user = userName.Name - } else { - team = teamName.Name + log.Infof("Using direct cluster endpoint at %s", server) + options.Server = server + path + if cluster.Annotations[LoftDirectClusterEndpointInsecure] == "true" { + options.InsecureSkipTLSVerify = true } - - return user, team, nil + options.DirectClusterEndpointEnabled = true + return options } -func getAccess(user, team string) []storagev1.Access { - access := []storagev1.Access{ - { - Verbs: []string{"*"}, - Subresources: []string{"*"}, - }, +func retrieveCaData(cluster *managementv1.Cluster) ([]byte, error) { + if cluster == nil || cluster.Annotations == nil || cluster.Annotations[LoftDirectClusterEndpointCaData] == "" { + return nil, nil } - if team != "" { - access[0].Teams = []string{team} - } else { - access[0].Users = []string{user} + data, err := base64.StdEncoding.DecodeString(cluster.Annotations[LoftDirectClusterEndpointCaData]) + if err != nil { + return nil, fmt.Errorf("error decoding cluster %s annotation: %w", LoftDirectClusterEndpointCaData, err) } - return access + return data, nil } diff --git a/cmd/vclusterctl/cmd/platform/connect/connect.go b/cmd/vclusterctl/cmd/platform/connect/connect.go index 2ff5bf1e1..1b9eafd45 100644 --- a/cmd/vclusterctl/cmd/platform/connect/connect.go +++ b/cmd/vclusterctl/cmd/platform/connect/connect.go @@ -1,22 +1,25 @@ package connect import ( - "github.com/loft-sh/loftctl/v4/cmd/loftctl/flags" + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/vcluster/pkg/cli/flags" + platformdefaults "github.com/loft-sh/vcluster/pkg/platform/defaults" "github.com/spf13/cobra" ) -// NewConnectCmd creates a new command -func NewConnectCmd(globalFlags *flags.GlobalFlags) *cobra.Command { - connectCmd := &cobra.Command{ +// NewConnectCmd creates a new cobra command +func NewConnectCmd(globalFlags *flags.GlobalFlags, _ *platformdefaults.Defaults) *cobra.Command { + description := product.ReplaceWithHeader("use", ` + +Activates a kube context for the given cluster / space / vcluster / management. + `) + useCmd := &cobra.Command{ Use: "connect", - Short: "Connects a cluster to vCluster platform", - Long: `####################################################### -########### vcluster platform connect ################# -####################################################### - `, - Args: cobra.NoArgs, + Short: product.Replace("Uses loft resources"), + Long: description, + Args: cobra.NoArgs, } - connectCmd.AddCommand(NewClusterCmd(globalFlags)) - return connectCmd + useCmd.AddCommand(NewClusterCmd(globalFlags)) + return useCmd } diff --git a/cmd/vclusterctl/cmd/platform/get/cluster.go b/cmd/vclusterctl/cmd/platform/get/cluster.go new file mode 100644 index 000000000..6b6869029 --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/get/cluster.go @@ -0,0 +1,182 @@ +package get + +import ( + "context" + "errors" + "os" + "strings" + "time" + + managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" + "github.com/loft-sh/vcluster/pkg/cli/flags" + client "github.com/loft-sh/vcluster/pkg/platform/loftclient" + "github.com/loft-sh/vcluster/pkg/platform/loftclient/naming" + config "github.com/loft-sh/vcluster/pkg/platform/loftconfig" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" +) + +var ( + ErrNotLoftContext = errors.New("current context is not a loft context, but predefined var LOFT_CLUSTER is used") +) + +type clusterCmd struct { + *flags.GlobalFlags +} + +func newClusterCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + cmd := &clusterCmd{ + GlobalFlags: globalFlags, + } + + return &cobra.Command{ + Use: "cluster", + Short: "Prints the current cluster", + Args: cobra.NoArgs, + RunE: func(cobraCmd *cobra.Command, args []string) error { + return cmd.Run(cobraCmd.Context(), args) + }, + } +} + +// Run executes the command logic +func (c *clusterCmd) Run(ctx context.Context, _ []string) error { + kubeConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return err + } + + kubeContext := os.Getenv("DEVSPACE_PLUGIN_KUBE_CONTEXT_FLAG") + if kubeContext == "" { + kubeContext = kubeConfig.CurrentContext + } + + cluster, ok := kubeConfig.Clusters[kubeContext] + if !ok { + return ErrNotLoftContext + } + + isProject, projectName := isProjectContext(cluster) + if isProject { + baseClient, err := client.NewClientFromPath(c.Config) + if err != nil { + return err + } + + managementClient, err := baseClient.Management() + if err != nil { + return err + } + + if isSpace, spaceName := isSpaceContext(cluster); isSpace { + var spaceInstance *managementv1.SpaceInstance + err := wait.PollUntilContextTimeout(ctx, time.Second, config.Timeout(), true, func(ctx context.Context) (bool, error) { + var err error + + spaceInstance, err = managementClient.Loft().ManagementV1().SpaceInstances(naming.ProjectNamespace(projectName)).Get(ctx, spaceName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + // Wait for space instance to be scheduled + if spaceInstance.Spec.ClusterRef.Cluster == "" { + return false, nil + } + + return true, nil + }) + if err != nil { + return err + } + + _, err = os.Stdout.Write([]byte(spaceInstance.Spec.ClusterRef.Cluster)) + return err + } + + if isVirtualCluster, virtualClusterName := isVirtualClusterContext(cluster); isVirtualCluster { + var virtualClusterInstance *managementv1.VirtualClusterInstance + err := wait.PollUntilContextTimeout(ctx, time.Second, config.Timeout(), true, func(ctx context.Context) (bool, error) { + var err error + + virtualClusterInstance, err = managementClient.Loft().ManagementV1().VirtualClusterInstances(naming.ProjectNamespace(projectName)).Get(ctx, virtualClusterName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + // Wait for space instance to be scheduled + if virtualClusterInstance.Spec.ClusterRef.Cluster == "" { + return false, nil + } + + return true, nil + }) + if err != nil { + return err + } + + _, err = os.Stdout.Write([]byte(virtualClusterInstance.Spec.ClusterRef.Cluster)) + return err + } + + return ErrNotLoftContext + } + + server := strings.TrimSuffix(cluster.Server, "/") + splitted := strings.Split(server, "/") + if len(splitted) < 3 { + return ErrNotLoftContext + } else if splitted[len(splitted)-2] != "cluster" || splitted[len(splitted)-3] != "kubernetes" { + return ErrNotLoftContext + } + + _, err = os.Stdout.Write([]byte(splitted[len(splitted)-1])) + return err +} + +func isProjectContext(cluster *api.Cluster) (bool, string) { + server := strings.TrimSuffix(cluster.Server, "/") + splitted := strings.Split(server, "/") + + if len(splitted) < 8 { + return false, "" + } + + if splitted[4] == "project" { + return true, splitted[5] + } + + return false, "" +} + +func isSpaceContext(cluster *api.Cluster) (bool, string) { + server := strings.TrimSuffix(cluster.Server, "/") + splitted := strings.Split(server, "/") + + if len(splitted) < 8 { + return false, "" + } + + if splitted[6] == "space" { + return true, splitted[7] + } + + return false, "" +} + +func isVirtualClusterContext(cluster *api.Cluster) (bool, string) { + server := strings.TrimSuffix(cluster.Server, "/") + splitted := strings.Split(server, "/") + + if len(splitted) < 8 { + return false, "" + } + + if splitted[6] == "virtualcluster" { + return true, splitted[7] + } + + return false, "" +} diff --git a/cmd/vclusterctl/cmd/platform/get/get.go b/cmd/vclusterctl/cmd/platform/get/get.go new file mode 100644 index 000000000..2ec7b1580 --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/get/get.go @@ -0,0 +1,22 @@ +package get + +import ( + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/spf13/cobra" +) + +// NewVarsCmd creates a new cobra command for the sub command +func NewVarsCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + description := product.ReplaceWithHeader("var", "") + + cmd := &cobra.Command{ + Use: "get", + Short: "Retrieves and display informations", + Long: description, + Args: cobra.NoArgs, + } + + cmd.AddCommand(newClusterCmd(globalFlags)) + return cmd +} diff --git a/cmd/vclusterctl/cmd/platform/import.go b/cmd/vclusterctl/cmd/platform/import.go index 1d2a6d4b8..7ae13feec 100644 --- a/cmd/vclusterctl/cmd/platform/import.go +++ b/cmd/vclusterctl/cmd/platform/import.go @@ -3,11 +3,11 @@ package platform import ( "context" - loftctlUtil "github.com/loft-sh/loftctl/v4/pkg/util" "github.com/loft-sh/log" "github.com/loft-sh/vcluster/pkg/cli" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/loft-sh/vcluster/pkg/platform" + loftctlUtil "github.com/loft-sh/vcluster/pkg/platform/loftutils" "github.com/spf13/cobra" ) diff --git a/cmd/vclusterctl/cmd/platform/list/clusters.go b/cmd/vclusterctl/cmd/platform/list/clusters.go new file mode 100644 index 000000000..bac9cce2d --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/list/clusters.go @@ -0,0 +1,81 @@ +package list + +import ( + "context" + "time" + + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/log" + "github.com/loft-sh/log/table" + "github.com/loft-sh/vcluster/pkg/cli/flags" + client "github.com/loft-sh/vcluster/pkg/platform/loftclient" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/duration" +) + +// ClustersCmd holds the login cmd flags +type ClustersCmd struct { + *flags.GlobalFlags + + log log.Logger +} + +// NewClustersCmd creates a new spaces command +func NewClustersCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + cmd := &ClustersCmd{ + GlobalFlags: globalFlags, + log: log.GetInstance(), + } + description := product.ReplaceWithHeader("list clusters", ` +List the vcluster platform clusters you have access to + +Example: +vcluster platform list clusters +######################################################## + `) + clustersCmd := &cobra.Command{ + Use: "clusters", + Short: product.Replace("Lists the loft clusters you have access to"), + Long: description, + Args: cobra.NoArgs, + RunE: func(cobraCmd *cobra.Command, _ []string) error { + return cmd.RunClusters(cobraCmd.Context()) + }, + } + + return clustersCmd +} + +// RunClusters executes the functionality +func (cmd *ClustersCmd) RunClusters(ctx context.Context) error { + baseClient, err := client.NewClientFromPath(cmd.Config) + if err != nil { + return err + } + + managementClient, err := baseClient.Management() + if err != nil { + return err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return err + } + + header := []string{ + "Cluster", + "Age", + } + values := [][]string{} + for _, cluster := range clusterList.Items { + values = append(values, []string{ + cluster.Name, + duration.HumanDuration(time.Since(cluster.CreationTimestamp.Time)), + }) + } + + table.PrintTable(cmd.log, header, values) + return nil +} diff --git a/cmd/vclusterctl/cmd/platform/list/list.go b/cmd/vclusterctl/cmd/platform/list/list.go new file mode 100644 index 000000000..efe08962b --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/list/list.go @@ -0,0 +1,23 @@ +package list + +import ( + "github.com/loft-sh/api/v4/pkg/product" + + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/spf13/cobra" +) + +// NewListCmd creates a new cobra command +func NewListCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + description := product.ReplaceWithHeader("list", "") + listCmd := &cobra.Command{ + Use: "list", + Short: "Lists configuration", + Long: description, + Args: cobra.NoArgs, + } + + // TODO: change that with the actual globalFlag variable + listCmd.AddCommand(NewClustersCmd(globalFlags)) + return listCmd +} diff --git a/cmd/vclusterctl/cmd/platform/platform.go b/cmd/vclusterctl/cmd/platform/platform.go index fad705bdd..a3073f4d3 100644 --- a/cmd/vclusterctl/cmd/platform/platform.go +++ b/cmd/vclusterctl/cmd/platform/platform.go @@ -3,9 +3,8 @@ package platform import ( "fmt" - "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/connect" + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/add" "github.com/loft-sh/vcluster/pkg/cli/flags" - "github.com/loft-sh/vcluster/pkg/platform" "github.com/spf13/cobra" ) @@ -20,20 +19,15 @@ func NewPlatformCmd(globalFlags *flags.GlobalFlags) (*cobra.Command, error) { Args: cobra.NoArgs, } - loftctlGlobalFlags, err := platform.GlobalFlags(globalFlags) - if err != nil { - return nil, fmt.Errorf("failed to parse pro flags: %w", err) - } - - startCmd, err := NewStartCmd(loftctlGlobalFlags) + startCmd, err := NewStartCmd(globalFlags) if err != nil { return nil, fmt.Errorf("failed to create vcluster platform start command: %w", err) } platformCmd.AddCommand(startCmd) - platformCmd.AddCommand(NewResetCmd(loftctlGlobalFlags)) - platformCmd.AddCommand(connect.NewConnectCmd(loftctlGlobalFlags)) - platformCmd.AddCommand(NewAccessKeyCmd(loftctlGlobalFlags)) + platformCmd.AddCommand(NewResetCmd(globalFlags)) + platformCmd.AddCommand(add.NewAddCmd(globalFlags)) + platformCmd.AddCommand(NewAccessKeyCmd(globalFlags)) platformCmd.AddCommand(NewImportCmd(globalFlags)) return platformCmd, nil diff --git a/cmd/vclusterctl/cmd/platform/pro.go b/cmd/vclusterctl/cmd/platform/pro.go index c366f6535..408b40011 100644 --- a/cmd/vclusterctl/cmd/platform/pro.go +++ b/cmd/vclusterctl/cmd/platform/pro.go @@ -3,9 +3,10 @@ package platform import ( "fmt" + "github.com/loft-sh/log" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/connect" "github.com/loft-sh/vcluster/pkg/cli/flags" - "github.com/loft-sh/vcluster/pkg/platform" + platformdefaults "github.com/loft-sh/vcluster/pkg/platform/defaults" "github.com/spf13/cobra" ) @@ -22,20 +23,19 @@ Deprecated, please use vcluster platform instead Args: cobra.NoArgs, } - loftctlGlobalFlags, err := platform.GlobalFlags(globalFlags) + startCmd, err := NewStartCmd(globalFlags) if err != nil { - return nil, fmt.Errorf("failed to parse pro flags: %w", err) + return nil, fmt.Errorf("failed to create vcluster pro start command: %w", err) } - - startCmd, err := NewStartCmd(loftctlGlobalFlags) + d, err := platformdefaults.NewFromPath(platformdefaults.ConfigFolder, platformdefaults.ConfigFile) if err != nil { - return nil, fmt.Errorf("failed to create vcluster pro start command: %w", err) + log.Default.Debugf(err.Error()) } proCmd.AddCommand(startCmd) - proCmd.AddCommand(NewResetCmd(loftctlGlobalFlags)) - proCmd.AddCommand(connect.NewConnectCmd(loftctlGlobalFlags)) - proCmd.AddCommand(NewAccessKeyCmd(loftctlGlobalFlags)) + proCmd.AddCommand(NewResetCmd(globalFlags)) + proCmd.AddCommand(connect.NewConnectCmd(globalFlags, d)) + proCmd.AddCommand(NewAccessKeyCmd(globalFlags)) return proCmd, nil } diff --git a/cmd/vclusterctl/cmd/platform/reset.go b/cmd/vclusterctl/cmd/platform/reset.go index 6a5db1fdb..83357bd9c 100644 --- a/cmd/vclusterctl/cmd/platform/reset.go +++ b/cmd/vclusterctl/cmd/platform/reset.go @@ -1,13 +1,13 @@ package platform import ( - "github.com/loft-sh/loftctl/v4/cmd/loftctl/cmd/reset" - loftctlflags "github.com/loft-sh/loftctl/v4/cmd/loftctl/flags" "github.com/loft-sh/log" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/cli/reset" "github.com/spf13/cobra" ) -func NewResetCmd(loftctlGlobalFlags *loftctlflags.GlobalFlags) *cobra.Command { +func NewResetCmd(loftctlGlobalFlags *flags.GlobalFlags) *cobra.Command { description := `######################################################## ############# vcluster platform reset ################## ######################################################## @@ -24,7 +24,7 @@ func NewResetCmd(loftctlGlobalFlags *loftctlflags.GlobalFlags) *cobra.Command { return cmd } -func NewPasswordCmd(globalFlags *loftctlflags.GlobalFlags) *cobra.Command { +func NewPasswordCmd(globalFlags *flags.GlobalFlags) *cobra.Command { cmd := &reset.PasswordCmd{ GlobalFlags: globalFlags, Log: log.GetInstance(), diff --git a/cmd/vclusterctl/cmd/platform/start.go b/cmd/vclusterctl/cmd/platform/start.go index 5654ce410..0279a5e9e 100644 --- a/cmd/vclusterctl/cmd/platform/start.go +++ b/cmd/vclusterctl/cmd/platform/start.go @@ -4,12 +4,12 @@ import ( "context" "fmt" - loftctlflags "github.com/loft-sh/loftctl/v4/cmd/loftctl/flags" - "github.com/loft-sh/loftctl/v4/pkg/start" "github.com/loft-sh/log" "github.com/loft-sh/log/survey" "github.com/loft-sh/log/terminal" "github.com/loft-sh/vcluster/pkg/cli/find" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/cli/start" "github.com/loft-sh/vcluster/pkg/platform" "github.com/spf13/cobra" "k8s.io/client-go/tools/clientcmd" @@ -19,7 +19,7 @@ type StartCmd struct { start.Options } -func NewStartCmd(loftctlGlobalFlags *loftctlflags.GlobalFlags) (*cobra.Command, error) { +func NewStartCmd(loftctlGlobalFlags *flags.GlobalFlags) (*cobra.Command, error) { cmd := &StartCmd{ Options: start.Options{ GlobalFlags: loftctlGlobalFlags, diff --git a/go.mod b/go.mod index 3454a1671..6c7a88e23 100644 --- a/go.mod +++ b/go.mod @@ -35,6 +35,7 @@ require ( github.com/rhysd/go-github-selfupdate v1.2.3 github.com/samber/lo v1.38.1 github.com/sirupsen/logrus v1.9.3 + github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/vmware-labs/yaml-jsonpath v0.3.2 @@ -100,7 +101,6 @@ require ( github.com/rivo/uniseg v0.4.6 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect - github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 // indirect diff --git a/pkg/cli/reset/password.go b/pkg/cli/reset/password.go new file mode 100644 index 000000000..43f928014 --- /dev/null +++ b/pkg/cli/reset/password.go @@ -0,0 +1,180 @@ +package reset + +import ( + "context" + "crypto/sha256" + "fmt" + "strings" + + storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/kube" + "github.com/loft-sh/loftctl/v4/pkg/random" + "github.com/loft-sh/log" + "github.com/loft-sh/log/survey" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/pkg/errors" + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" +) + +// PasswordCmd holds the lags +type PasswordCmd struct { + *flags.GlobalFlags + + User string + Password string + Create bool + Force bool + + Log log.Logger +} + +// NewPasswordCmd creates a new command +func NewPasswordCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + cmd := &PasswordCmd{ + GlobalFlags: globalFlags, + Log: log.GetInstance(), + } + description := product.ReplaceWithHeader("reset password", ` +Resets the password of a user. + +Example: +loft reset password +loft reset password --user admin +####################################################### + `) + c := &cobra.Command{ + Use: "password", + Short: "Resets the password of a user", + Long: description, + Args: cobra.NoArgs, + RunE: func(_ *cobra.Command, _ []string) error { + return cmd.Run() + }, + } + + c.Flags().StringVar(&cmd.User, "user", "admin", "The name of the user to reset the password") + c.Flags().StringVar(&cmd.Password, "password", "", "The new password to use") + c.Flags().BoolVar(&cmd.Create, "create", false, "Creates the user if it does not exist") + c.Flags().BoolVar(&cmd.Force, "force", false, "If user had no password will create one") + return c +} + +// Run executes the functionality +func (cmd *PasswordCmd) Run() error { + restConfig, err := ctrl.GetConfig() + if err != nil { + return errors.Wrap(err, "get kube config") + } + + managementClient, err := kube.NewForConfig(restConfig) + if err != nil { + return err + } + + // get user + cmd.Log.Infof("Resetting password of user %s", cmd.User) + user, err := managementClient.Loft().StorageV1().Users().Get(context.Background(), cmd.User, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return errors.Wrap(err, "get user") + } else if kerrors.IsNotFound(err) { + // create user + if !cmd.Create { + return fmt.Errorf("user %s was not found, run with '--create' to create this user automatically", cmd.User) + } + + user, err = managementClient.Loft().StorageV1().Users().Create(context.Background(), &storagev1.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmd.User, + }, + Spec: storagev1.UserSpec{ + Username: cmd.User, + Subject: cmd.User, + Groups: []string{ + "system:masters", + }, + PasswordRef: &storagev1.SecretRef{ + SecretName: "loft-password-" + random.RandomString(5), + SecretNamespace: "loft", + Key: "password", + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return err + } + } + + // check if user had a password before + if user.Spec.PasswordRef == nil || user.Spec.PasswordRef.SecretName == "" || user.Spec.PasswordRef.SecretNamespace == "" || user.Spec.PasswordRef.Key == "" { + if !cmd.Force { + return fmt.Errorf("user %s had no password. If you want to force password creation, please run with the '--force' flag", cmd.User) + } + + user.Spec.PasswordRef = &storagev1.SecretRef{ + SecretName: "loft-password-" + random.RandomString(5), + SecretNamespace: "loft", + Key: "password", + } + user, err = managementClient.Loft().StorageV1().Users().Update(context.Background(), user, metav1.UpdateOptions{}) + if err != nil { + return errors.Wrap(err, "update user") + } + } + + // now ask user for new password + password := cmd.Password + if password == "" { + for { + password, err = cmd.Log.Question(&survey.QuestionOptions{ + Question: "Please enter a new password", + IsPassword: true, + }) + password = strings.TrimSpace(password) + if err != nil { + return err + } else if password == "" { + cmd.Log.Error("Please enter a password") + continue + } + + break + } + } + passwordHash := []byte(fmt.Sprintf("%x", sha256.Sum256([]byte(password)))) + + // check if secret exists + passwordSecret, err := managementClient.CoreV1().Secrets(user.Spec.PasswordRef.SecretNamespace).Get(context.Background(), user.Spec.PasswordRef.SecretName, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } else if kerrors.IsNotFound(err) { + _, err = managementClient.CoreV1().Secrets(user.Spec.PasswordRef.SecretNamespace).Create(context.Background(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: user.Spec.PasswordRef.SecretName, + Namespace: user.Spec.PasswordRef.SecretNamespace, + }, + Data: map[string][]byte{ + user.Spec.PasswordRef.Key: passwordHash, + }, + }, metav1.CreateOptions{}) + if err != nil { + return errors.Wrap(err, "create password secret") + } + } else { + if passwordSecret.Data == nil { + passwordSecret.Data = map[string][]byte{} + } + passwordSecret.Data[user.Spec.PasswordRef.Key] = passwordHash + _, err = managementClient.CoreV1().Secrets(user.Spec.PasswordRef.SecretNamespace).Update(context.Background(), passwordSecret, metav1.UpdateOptions{}) + if err != nil { + return errors.Wrap(err, "update password secret") + } + } + + cmd.Log.Donef("Successfully reset password of user %s", cmd.User) + return nil +} diff --git a/pkg/cli/reset/reset.go b/pkg/cli/reset/reset.go new file mode 100644 index 000000000..bd0e980ee --- /dev/null +++ b/pkg/cli/reset/reset.go @@ -0,0 +1,21 @@ +package reset + +import ( + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/spf13/cobra" +) + +// NewResetCmd creates a new cobra command +func NewResetCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + description := product.ReplaceWithHeader("reset", "") + c := &cobra.Command{ + Use: "reset", + Short: "Reset configuration", + Long: description, + Args: cobra.NoArgs, + } + + c.AddCommand(NewPasswordCmd(globalFlags)) + return c +} diff --git a/pkg/cli/start/docker.go b/pkg/cli/start/docker.go new file mode 100644 index 000000000..85578565c --- /dev/null +++ b/pkg/cli/start/docker.go @@ -0,0 +1,420 @@ +package start + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "strings" + "time" + + "github.com/denisbrodbeck/machineid" + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/clihelper" + "github.com/loft-sh/log" + "github.com/loft-sh/log/hash" + "github.com/loft-sh/log/scanner" + "github.com/mgutz/ansi" + "github.com/mitchellh/go-homedir" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/util/wait" +) + +var ( + ErrMissingContainer = errors.New("missing container") + ErrLoftNotReachable = errors.New("product is not reachable") +) + +type ContainerDetails struct { + NetworkSettings ContainerNetworkSettings `json:"NetworkSettings,omitempty"` + State ContainerDetailsState `json:"State,omitempty"` + ID string `json:"ID,omitempty"` + Created string `json:"Created,omitempty"` + Config ContainerDetailsConfig `json:"Config,omitempty"` +} + +type ContainerNetworkSettings struct { + Ports map[string][]ContainerPort `json:"ports,omitempty"` +} + +type ContainerPort struct { + HostIP string `json:"HostIp,omitempty"` + HostPort string `json:"HostPort,omitempty"` +} + +type ContainerDetailsConfig struct { + Labels map[string]string `json:"Labels,omitempty"` + Image string `json:"Image,omitempty"` + User string `json:"User,omitempty"` + Env []string `json:"Env,omitempty"` +} + +type ContainerDetailsState struct { + Status string `json:"Status,omitempty"` + StartedAt string `json:"StartedAt,omitempty"` +} + +func (l *LoftStarter) startDocker(ctx context.Context, name string) error { + l.Log.Infof(product.Replace("Starting loft in Docker...")) + + // prepare installation + err := l.prepareDocker() + if err != nil { + return err + } + + // try to find loft container + containerID, err := l.findLoftContainer(ctx, name, true) + if err != nil { + return err + } + + // check if container is there + if containerID != "" && (l.Reset || l.Upgrade) { + l.Log.Info(product.Replace("Existing Loft instance found.")) + err = l.uninstallDocker(ctx, containerID) + if err != nil { + return err + } + + containerID = "" + } + + // Use default password if none is set + if l.Password == "" { + l.Password = getMachineUID(l.Log) + } + + // check if is installed + if containerID != "" { + l.Log.Info(product.Replace("Existing Loft instance found. Run with --upgrade to apply new configuration")) + return l.successDocker(ctx, containerID) + } + + // Install Loft + l.Log.Info(product.Replace("Welcome to Loft!")) + l.Log.Info(product.Replace("This installer will help you configure and deploy Loft.")) + + // make sure we are ready for installing + containerID, err = l.runLoftInDocker(ctx, name) + if err != nil { + return err + } else if containerID == "" { + return fmt.Errorf("%w: %s", ErrMissingContainer, product.Replace("couldn't find Loft container after starting it")) + } + + return l.successDocker(ctx, containerID) +} + +func (l *LoftStarter) successDocker(ctx context.Context, containerID string) error { + if l.NoWait { + return nil + } + + // wait until Loft is ready + host, err := l.waitForLoftDocker(ctx, containerID) + if err != nil { + return err + } + + // wait for domain to become reachable + l.Log.Infof(product.Replace("Wait for Loft to become available at %s..."), host) + err = wait.PollUntilContextTimeout(ctx, time.Second, time.Minute*10, true, func(ctx context.Context) (bool, error) { + containerDetails, err := l.inspectContainer(ctx, containerID) + if err != nil { + return false, fmt.Errorf("inspect loft container: %w", err) + } else if strings.ToLower(containerDetails.State.Status) == "exited" || strings.ToLower(containerDetails.State.Status) == "dead" { + logs, _ := l.logsContainer(ctx, containerID) + return false, fmt.Errorf("container failed (status: %s):\n %s", containerDetails.State.Status, logs) + } + + return clihelper.IsLoftReachable(ctx, host) + }) + if err != nil { + return fmt.Errorf(product.Replace("error waiting for loft: %v%w"), err) + } + + // print success message + PrintSuccessMessageDockerInstall(host, l.Password, l.Log) + return nil +} + +func PrintSuccessMessageDockerInstall(host, password string, log log.Logger) { + url := "https://" + host + log.WriteString(logrus.InfoLevel, fmt.Sprintf(product.Replace(` + + +########################## LOGIN ############################ + +Username: `+ansi.Color("admin", "green+b")+` +Password: `+ansi.Color(password, "green+b")+` + +Login via UI: %s +Login via CLI: %s + +################################################################# + +Loft was successfully installed and can now be reached at: %s + +Thanks for using Loft! +`), + ansi.Color(url, "green+b"), + ansi.Color(product.LoginCmd()+" "+url, "green+b"), + url, + )) +} + +func (l *LoftStarter) waitForLoftDocker(ctx context.Context, containerID string) (string, error) { + l.Log.Info(product.Replace("Wait for Loft to become available...")) + + // check for local port + containerDetails, err := l.inspectContainer(ctx, containerID) + if err != nil { + return "", err + } else if len(containerDetails.NetworkSettings.Ports) > 0 && len(containerDetails.NetworkSettings.Ports["10443/tcp"]) > 0 { + return "localhost:" + containerDetails.NetworkSettings.Ports["10443/tcp"][0].HostPort, nil + } + + // check if no tunnel + if l.NoTunnel { + return "", fmt.Errorf("%w: %s", ErrLoftNotReachable, product.Replace("cannot connect to Loft as it has no exposed port and --no-tunnel is enabled")) + } + + // wait for router + url := "" + waitErr := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute*10, true, func(ctx context.Context) (bool, error) { + url, err = l.findLoftRouter(ctx, containerID) + if err != nil { + return false, nil + } + + return true, nil + }) + if waitErr != nil { + return "", fmt.Errorf("error waiting for loft router domain: %w", err) + } + + return url, nil +} + +func (l *LoftStarter) findLoftRouter(ctx context.Context, id string) (string, error) { + out, err := l.buildDockerCmd(ctx, "exec", id, "cat", "/var/lib/loft/loft-domain.txt").Output() + if err != nil { + return "", WrapCommandError(out, err) + } + + return strings.TrimSpace(string(out)), nil +} + +func (l *LoftStarter) prepareDocker() error { + // test for helm and kubectl + _, err := exec.LookPath("docker") + if err != nil { + return fmt.Errorf("seems like docker is not installed. Docker is required for the installation of loft. Please visit https://docs.docker.com/engine/install/ for install instructions") + } + + output, err := exec.Command("docker", "ps").CombinedOutput() + if err != nil { + return fmt.Errorf("seems like there are issues with your docker cli: \n\n%s", output) + } + + return nil +} + +func (l *LoftStarter) uninstallDocker(ctx context.Context, id string) error { + l.Log.Infof(product.Replace("Uninstalling loft...")) + + // stop container + out, err := l.buildDockerCmd(ctx, "stop", id).Output() + if err != nil { + return fmt.Errorf("stop container: %w", WrapCommandError(out, err)) + } + + // remove container + out, err = l.buildDockerCmd(ctx, "rm", id).Output() + if err != nil { + return fmt.Errorf("remove container: %w", WrapCommandError(out, err)) + } + + return nil +} + +func (l *LoftStarter) runLoftInDocker(ctx context.Context, name string) (string, error) { + args := []string{"run", "-d", "--name", name} + if l.NoTunnel { + args = append(args, "--env", "DISABLE_LOFT_ROUTER=true") + } + if l.Password != "" { + args = append(args, "--env", "ADMIN_PASSWORD_HASH="+hash.String(l.Password)) + } + + // run as root otherwise we get permission errors + args = append(args, "-u", "root") + + // mount the loft lib + args = append(args, "-v", "loft-data:/var/lib/loft") + + // set port + if l.LocalPort != "" { + args = append(args, "-p", l.LocalPort+":10443") + } + + // set extra args + args = append(args, l.DockerArgs...) + + // set image + if l.DockerImage != "" { + args = append(args, l.DockerImage) + } else if l.Version != "" { + args = append(args, "ghcr.io/loft-sh/loft:"+strings.TrimPrefix(l.Version, "v")) + } else { + args = append(args, "ghcr.io/loft-sh/loft:latest") + } + + l.Log.Infof("Start Loft via 'docker %s'", strings.Join(args, " ")) + runCmd := l.buildDockerCmd(ctx, args...) + runCmd.Stdout = os.Stdout + runCmd.Stderr = os.Stderr + err := runCmd.Run() + if err != nil { + return "", err + } + + return l.findLoftContainer(ctx, name, false) +} + +func (l *LoftStarter) logsContainer(ctx context.Context, id string) (string, error) { + args := []string{"logs", id} + out, err := l.buildDockerCmd(ctx, args...).CombinedOutput() + if err != nil { + return "", fmt.Errorf("logs container: %w", WrapCommandError(out, err)) + } + + return string(out), nil +} + +func (l *LoftStarter) inspectContainer(ctx context.Context, id string) (*ContainerDetails, error) { + args := []string{"inspect", "--type", "container", id} + out, err := l.buildDockerCmd(ctx, args...).Output() + if err != nil { + return nil, fmt.Errorf("inspect container: %w", WrapCommandError(out, err)) + } + + containerDetails := []*ContainerDetails{} + err = json.Unmarshal(out, &containerDetails) + if err != nil { + return nil, fmt.Errorf("parse inspect output: %w", err) + } else if len(containerDetails) == 0 { + return nil, fmt.Errorf("coudln't find container %s", id) + } + + return containerDetails[0], nil +} + +func (l *LoftStarter) removeContainer(ctx context.Context, id string) error { + args := []string{"rm", id} + out, err := l.buildDockerCmd(ctx, args...).Output() + if err != nil { + return fmt.Errorf("remove container: %w", WrapCommandError(out, err)) + } + + return nil +} + +func (l *LoftStarter) findLoftContainer(ctx context.Context, name string, onlyRunning bool) (string, error) { + args := []string{"ps", "-q", "-a", "-f", "name=^" + name + "$"} + out, err := l.buildDockerCmd(ctx, args...).Output() + if err != nil { + // fallback to manual search + return "", fmt.Errorf("error finding container: %w", WrapCommandError(out, err)) + } + + arr := []string{} + scan := scanner.NewScanner(bytes.NewReader(out)) + for scan.Scan() { + arr = append(arr, strings.TrimSpace(scan.Text())) + } + if len(arr) == 0 { + return "", nil + } + + // remove the failed / exited containers + runningContainerID := "" + for _, containerID := range arr { + containerState, err := l.inspectContainer(ctx, containerID) + if err != nil { + return "", err + } else if onlyRunning && strings.ToLower(containerState.State.Status) != "running" { + err = l.removeContainer(ctx, containerID) + if err != nil { + return "", err + } + } else { + runningContainerID = containerID + } + } + + return runningContainerID, nil +} + +func (l *LoftStarter) buildDockerCmd(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, "docker", args...) + return cmd +} + +func WrapCommandError(stdout []byte, err error) error { + if err == nil { + return nil + } + + return &Error{ + stdout: stdout, + err: err, + } +} + +type Error struct { + err error + stdout []byte +} + +func (e *Error) Error() string { + message := "" + if len(e.stdout) > 0 { + message += string(e.stdout) + "\n" + } + + var exitError *exec.ExitError + if errors.As(e.err, &exitError) && len(exitError.Stderr) > 0 { + message += string(exitError.Stderr) + "\n" + } + + return message + e.err.Error() +} + +func getMachineUID(log log.Logger) string { + id, err := machineid.ID() + if err != nil { + id = "error" + if log != nil { + log.Debugf("Error retrieving machine uid: %v", err) + } + } + // get $HOME to distinguish two users on the same machine + // will be hashed later together with the ID + home, err := homedir.Dir() + if err != nil { + home = "error" + if log != nil { + log.Debugf("Error retrieving machine home: %v", err) + } + } + mac := hmac.New(sha256.New, []byte(id)) + mac.Write([]byte(home)) + return fmt.Sprintf("%x", mac.Sum(nil)) +} diff --git a/pkg/cli/start/login.go b/pkg/cli/start/login.go new file mode 100644 index 000000000..e11f52a0c --- /dev/null +++ b/pkg/cli/start/login.go @@ -0,0 +1,121 @@ +package start + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net/http" + netUrl "net/url" + "strings" + + types "github.com/loft-sh/api/v4/pkg/auth" + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/client" + "github.com/mgutz/ansi" + "github.com/sirupsen/logrus" + "github.com/skratchdot/open-golang/open" +) + +const defaultUser = "admin" + +func (l *LoftStarter) login(url string) error { + if !strings.HasPrefix(url, "https://") { + url = "https://" + url + } + + // check if we are already logged in + if l.isLoggedIn(url) { + // still open the UI + err := open.Run(url) + if err != nil { + return fmt.Errorf("couldn't open the login page in a browser: %w", err) + } + + return nil + } + + // log into the CLI + err := l.loginViaCLI(url) + if err != nil { + return err + } + + // log into the UI + err = l.loginUI(url) + if err != nil { + return err + } + + return nil +} + +func (l *LoftStarter) loginViaCLI(url string) error { + loginPath := "%s/auth/password/login" + + loginRequest := types.PasswordLoginRequest{ + Username: defaultUser, + Password: l.Password, + } + + loginRequestBytes, err := json.Marshal(loginRequest) + if err != nil { + return err + } + + loginRequestBuf := bytes.NewBuffer(loginRequestBytes) + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + httpClient := &http.Client{Transport: tr} + + resp, err := httpClient.Post(fmt.Sprintf(loginPath, url), "application/json", loginRequestBuf) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + accessKey := &types.AccessKey{} + err = json.Unmarshal(body, accessKey) + if err != nil { + return err + } + + // log into loft + loader, err := client.NewClientFromPath(l.Config) + if err != nil { + return err + } + + url = strings.TrimSuffix(url, "/") + err = loader.LoginWithAccessKey(url, accessKey.AccessKey, true) + if err != nil { + return err + } + + l.Log.WriteString(logrus.InfoLevel, "\n") + l.Log.Donef(product.Replace("Successfully logged in via CLI into Loft instance %s"), ansi.Color(url, "white+b")) + + return nil +} + +func (l *LoftStarter) loginUI(url string) error { + queryString := fmt.Sprintf("username=%s&password=%s", defaultUser, netUrl.QueryEscape(l.Password)) + loginURL := fmt.Sprintf("%s/login#%s", url, queryString) + + err := open.Run(loginURL) + if err != nil { + return fmt.Errorf("couldn't open the login page in a browser: %w", err) + } + + l.Log.Infof("If the browser does not open automatically, please navigate to %s", loginURL) + + return nil +} diff --git a/pkg/cli/start/port_forwarding.go b/pkg/cli/start/port_forwarding.go new file mode 100644 index 000000000..9c35d91fe --- /dev/null +++ b/pkg/cli/start/port_forwarding.go @@ -0,0 +1,68 @@ +package start + +import ( + "context" + "net/http" + "time" + + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/clihelper" + "github.com/loft-sh/loftctl/v4/pkg/config" + "github.com/loft-sh/loftctl/v4/pkg/httputil" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" +) + +func (l *LoftStarter) startPortForwarding(ctx context.Context, loftPod *corev1.Pod) error { + stopChan, err := clihelper.StartPortForwarding(ctx, l.RestConfig, l.KubeClient, loftPod, l.LocalPort, l.Log) + if err != nil { + return err + } + go l.restartPortForwarding(ctx, stopChan) + + // wait until loft is reachable at the given url + httpClient := &http.Client{ + Transport: httputil.InsecureTransport(), + } + l.Log.Infof(product.Replace("Waiting until loft is reachable at https://localhost:%s"), l.LocalPort) + err = wait.PollUntilContextTimeout(ctx, time.Second, config.Timeout(), true, func(ctx context.Context) (bool, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, "https://localhost:"+l.LocalPort+"/version", nil) + if err != nil { + return false, nil + } + + resp, err := httpClient.Do(req) + if err != nil { + return false, nil + } + + return resp.StatusCode == http.StatusOK, nil + }) + if err != nil { + return err + } + + return nil +} + +func (l *LoftStarter) restartPortForwarding(ctx context.Context, stopChan chan struct{}) { + for { + <-stopChan + l.Log.Info("Restart port forwarding") + + // wait for loft pod to start + l.Log.Info(product.Replace("Waiting until loft pod has been started...")) + loftPod, err := clihelper.WaitForReadyLoftPod(ctx, l.KubeClient, l.Namespace, l.Log) + if err != nil { + l.Log.Fatalf(product.Replace("Error waiting for ready loft pod: %v"), err) + } + + // restart port forwarding + stopChan, err = clihelper.StartPortForwarding(ctx, l.RestConfig, l.KubeClient, loftPod, l.LocalPort, l.Log) + if err != nil { + l.Log.Fatalf("Error starting port forwarding: %v", err) + } + + l.Log.Donef("Successfully restarted port forwarding") + } +} diff --git a/pkg/cli/start/start.go b/pkg/cli/start/start.go new file mode 100644 index 000000000..4b54cb050 --- /dev/null +++ b/pkg/cli/start/start.go @@ -0,0 +1,305 @@ +package start + +import ( + "context" + "fmt" + "os" + "os/exec" + + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/client" + "github.com/loft-sh/loftctl/v4/pkg/clihelper" + "github.com/loft-sh/log" + "github.com/loft-sh/log/survey" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubectl/pkg/util/term" +) + +// Options holds the cmd flags +type Options struct { + *flags.GlobalFlags + // Will be filled later + KubeClient kubernetes.Interface + Log log.Logger + RestConfig *rest.Config + Context string + Values string + LocalPort string + Version string + DockerImage string + Namespace string + Password string + Host string + Email string + ChartRepo string + Product string + ChartName string + ChartPath string + DockerArgs []string + Reset bool + NoPortForwarding bool + NoTunnel bool + NoLogin bool + NoWait bool + Upgrade bool + ReuseValues bool + Docker bool +} + +func NewLoftStarter(options Options) *LoftStarter { + return &LoftStarter{ + Options: options, + } +} + +type LoftStarter struct { + Options +} + +// Start executes the functionality "loft start" +func (l *LoftStarter) Start(ctx context.Context) error { + // start in Docker? + if l.Docker { + return l.startDocker(ctx, "loft") + } + + // only set local port by default in kubernetes installation + if l.LocalPort == "" { + l.LocalPort = "9898" + } + + err := l.prepare() + if err != nil { + return err + } + l.Log.WriteString(logrus.InfoLevel, "\n") + + // Uninstall already existing Loft instance + if l.Reset { + err = clihelper.UninstallLoft(ctx, l.KubeClient, l.RestConfig, l.Context, l.Namespace, l.Log) + if err != nil { + return err + } + } + + // Is already installed? + isInstalled, err := clihelper.IsLoftAlreadyInstalled(ctx, l.KubeClient, l.Namespace) + if err != nil { + return err + } + + // Use default password if none is set + if l.Password == "" { + defaultPassword, err := clihelper.GetLoftDefaultPassword(ctx, l.KubeClient, l.Namespace) + if err != nil { + return err + } + + l.Password = defaultPassword + } + + // Upgrade Loft if already installed + if isInstalled { + return l.handleAlreadyExistingInstallation(ctx) + } + + // Install Loft + l.Log.Info(product.Replace("Welcome to Loft!")) + l.Log.Info(product.Replace("This installer will help you configure and deploy Loft.")) + + // make sure we are ready for installing + err = l.prepareInstall(ctx) + if err != nil { + return err + } + + err = l.upgradeLoft() + if err != nil { + return err + } + + return l.success(ctx) +} + +func (l *LoftStarter) prepareInstall(ctx context.Context) error { + // delete admin user & secret + return clihelper.UninstallLoft(ctx, l.KubeClient, l.RestConfig, l.Context, l.Namespace, log.Discard) +} + +func (l *LoftStarter) prepare() error { + loader, err := client.NewClientFromPath(l.Config) + if err != nil { + return err + } + loftConfig := loader.Config() + + // first load the kube config + kubeClientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}) + + // load the raw config + kubeConfig, err := kubeClientConfig.RawConfig() + if err != nil { + return fmt.Errorf("there is an error loading your current kube config (%w), please make sure you have access to a kubernetes cluster and the command `kubectl get namespaces` is working", err) + } + + // we switch the context to the install config + contextToLoad := kubeConfig.CurrentContext + if l.Context != "" { + contextToLoad = l.Context + } else if loftConfig.LastInstallContext != "" && loftConfig.LastInstallContext != contextToLoad { + contextToLoad, err = l.Log.Question(&survey.QuestionOptions{ + Question: product.Replace("Seems like you try to use 'loft start' with a different kubernetes context than before. Please choose which kubernetes context you want to use"), + DefaultValue: contextToLoad, + Options: []string{contextToLoad, loftConfig.LastInstallContext}, + }) + if err != nil { + return err + } + } + l.Context = contextToLoad + + loftConfig.LastInstallContext = contextToLoad + _ = loader.Save() + + // kube client config + kubeClientConfig = clientcmd.NewNonInteractiveClientConfig(kubeConfig, contextToLoad, &clientcmd.ConfigOverrides{}, clientcmd.NewDefaultClientConfigLoadingRules()) + + // test for helm and kubectl + _, err = exec.LookPath("helm") + if err != nil { + return fmt.Errorf("seems like helm is not installed. Helm is required for the installation of loft. Please visit https://helm.sh/docs/intro/install/ for install instructions") + } + + output, err := exec.Command("helm", "version").CombinedOutput() + if err != nil { + return fmt.Errorf("seems like there are issues with your helm client: \n\n%s", output) + } + + _, err = exec.LookPath("kubectl") + if err != nil { + return fmt.Errorf("seems like kubectl is not installed. Kubectl is required for the installation of loft. Please visit https://kubernetes.io/docs/tasks/tools/install-kubectl/ for install instructions") + } + + output, err = exec.Command("kubectl", "version", "--context", contextToLoad).CombinedOutput() + if err != nil { + return fmt.Errorf("seems like kubectl cannot connect to your Kubernetes cluster: \n\n%s", output) + } + + l.RestConfig, err = kubeClientConfig.ClientConfig() + if err != nil { + return fmt.Errorf("there is an error loading your current kube config (%w), please make sure you have access to a kubernetes cluster and the command `kubectl get namespaces` is working", err) + } + l.KubeClient, err = kubernetes.NewForConfig(l.RestConfig) + if err != nil { + return fmt.Errorf("there is an error loading your current kube config (%w), please make sure you have access to a kubernetes cluster and the command `kubectl get namespaces` is working", err) + } + + // Check if cluster has RBAC correctly configured + _, err = l.KubeClient.RbacV1().ClusterRoles().Get(context.Background(), "cluster-admin", metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("error retrieving cluster role 'cluster-admin': %w. Please make sure RBAC is correctly configured in your cluster", err) + } + + return nil +} + +func (l *LoftStarter) handleAlreadyExistingInstallation(ctx context.Context) error { + enableIngress := false + + // Only ask if ingress should be enabled if --upgrade flag is not provided + if !l.Upgrade && term.IsTerminal(os.Stdin) { + l.Log.Info(product.Replace("Existing Loft instance found.")) + + // Check if Loft is installed in a local cluster + isLocal := clihelper.IsLoftInstalledLocally(ctx, l.KubeClient, l.Namespace) + + // Skip question if --host flag is provided + if l.Host != "" { + enableIngress = true + } + + if enableIngress { + if isLocal { + // Confirm with user if this is a local cluster + const ( + YesOption = "Yes" + NoOption = "No, my cluster is running not locally (GKE, EKS, Bare Metal, etc.)" + ) + + answer, err := l.Log.Question(&survey.QuestionOptions{ + Question: "Seems like your cluster is running locally (docker desktop, minikube, kind etc.). Is that correct?", + DefaultValue: YesOption, + Options: []string{ + YesOption, + NoOption, + }, + }) + if err != nil { + return err + } + + isLocal = answer == YesOption + } + + if isLocal { + // Confirm with user if ingress should be installed in local cluster + var ( + YesOption = product.Replace("Yes, enable the ingress for Loft anyway") + NoOption = "No" + ) + + answer, err := l.Log.Question(&survey.QuestionOptions{ + Question: product.Replace("Enabling ingress is usually only useful for remote clusters. Do you still want to deploy the ingress for Loft to your local cluster?"), + DefaultValue: NoOption, + Options: []string{ + NoOption, + YesOption, + }, + }) + if err != nil { + return err + } + + enableIngress = answer == YesOption + } + } + + // Check if we need to enable ingress + if enableIngress { + // Ask for hostname if --host flag is not provided + if l.Host == "" { + host, err := clihelper.EnterHostNameQuestion(l.Log) + if err != nil { + return err + } + + l.Host = host + } else { + l.Log.Info(product.Replace("Will enable Loft ingress with hostname: ") + l.Host) + } + + if term.IsTerminal(os.Stdin) { + err := clihelper.EnsureIngressController(ctx, l.KubeClient, l.Context, l.Log) + if err != nil { + return errors.Wrap(err, "install ingress controller") + } + } + } + } + + // Only upgrade if --upgrade flag is present or user decided to enable ingress + if l.Upgrade || enableIngress { + err := l.upgradeLoft() + if err != nil { + return err + } + } + + return l.success(ctx) +} diff --git a/pkg/cli/start/success.go b/pkg/cli/start/success.go new file mode 100644 index 000000000..488b4ce43 --- /dev/null +++ b/pkg/cli/start/success.go @@ -0,0 +1,247 @@ +package start + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "strings" + "time" + + "github.com/loft-sh/admin-apis/pkg/licenseapi" + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/client" + "github.com/loft-sh/loftctl/v4/pkg/clihelper" + "github.com/loft-sh/loftctl/v4/pkg/config" + "github.com/loft-sh/loftctl/v4/pkg/printhelper" + "github.com/loft-sh/log/survey" + "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" +) + +func (l *LoftStarter) success(ctx context.Context) error { + if l.NoWait { + return nil + } + + // wait until Loft is ready + loftPod, err := l.waitForLoft(ctx) + if err != nil { + return err + } + + if l.NoPortForwarding { + return nil + } + + // check if Loft was installed locally + isLocal := clihelper.IsLoftInstalledLocally(ctx, l.KubeClient, l.Namespace) + if isLocal { + // check if loft domain secret is there + if !l.NoTunnel { + loftRouterDomain, err := l.pingLoftRouter(ctx, loftPod) + if err != nil { + l.Log.Errorf("Error retrieving loft router domain: %v", err) + l.Log.Info("Fallback to use port-forwarding") + } else if loftRouterDomain != "" { + return l.successLoftRouter(loftRouterDomain) + } + } + + // start port-forwarding + err = l.startPortForwarding(ctx, loftPod) + if err != nil { + return err + } + + return l.successLocal() + } + + // get login link + l.Log.Info("Checking Loft status...") + host, err := clihelper.GetLoftIngressHost(ctx, l.KubeClient, l.Namespace) + if err != nil { + return err + } + + // check if loft is reachable + reachable, err := clihelper.IsLoftReachable(ctx, host) + if !reachable || err != nil { + const ( + YesOption = "Yes" + NoOption = "No, please re-run the DNS check" + ) + + answer, err := l.Log.Question(&survey.QuestionOptions{ + Question: "Unable to reach Loft at https://" + host + ". Do you want to start port-forwarding instead?", + DefaultValue: YesOption, + Options: []string{ + YesOption, + NoOption, + }, + }) + if err != nil { + return err + } + + if answer == YesOption { + err = l.startPortForwarding(ctx, loftPod) + if err != nil { + return err + } + + return l.successLocal() + } + } + + return l.successRemote(ctx, host) +} + +func (l *LoftStarter) pingLoftRouter(ctx context.Context, loftPod *corev1.Pod) (string, error) { + loftRouterSecret, err := l.KubeClient.CoreV1().Secrets(loftPod.Namespace).Get(ctx, clihelper.LoftRouterDomainSecret, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return "", nil + } + + return "", fmt.Errorf("find loft router domain secret: %w", err) + } else if loftRouterSecret.Data == nil || len(loftRouterSecret.Data["domain"]) == 0 { + return "", nil + } + + // get the domain from secret + loftRouterDomain := string(loftRouterSecret.Data["domain"]) + + // wait until loft is reachable at the given url + httpClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + l.Log.Infof(product.Replace("Waiting until loft is reachable at https://%s"), loftRouterDomain) + err = wait.PollUntilContextTimeout(ctx, time.Second*3, time.Minute*5, true, func(ctx context.Context) (bool, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, "https://"+loftRouterDomain+"/version", nil) + if err != nil { + return false, nil + } + + resp, err := httpClient.Do(req) + if err != nil { + return false, nil + } + + return resp.StatusCode == http.StatusOK, nil + }) + if err != nil { + return "", err + } + + return loftRouterDomain, nil +} + +func (l *LoftStarter) successLoftRouter(url string) error { + if !l.NoLogin { + err := l.login(url) + if err != nil { + return err + } + } + + printhelper.PrintSuccessMessageLoftRouterInstall(url, l.Password, l.Log) + l.printVClusterProGettingStarted(url) + return nil +} + +func (l *LoftStarter) successLocal() error { + url := "https://localhost:" + l.LocalPort + + if !l.NoLogin { + err := l.login(url) + if err != nil { + return err + } + } + + printhelper.PrintSuccessMessageLocalInstall(l.Password, url, l.Log) + l.printVClusterProGettingStarted(url) + + blockChan := make(chan bool) + <-blockChan + return nil +} + +func (l *LoftStarter) isLoggedIn(url string) bool { + url = strings.TrimPrefix(url, "https://") + + c, err := client.NewClientFromPath(l.Config) + return err == nil && strings.TrimPrefix(strings.TrimSuffix(c.Config().Host, "/"), "https://") == strings.TrimSuffix(url, "/") +} + +func (l *LoftStarter) successRemote(ctx context.Context, host string) error { + ready, err := clihelper.IsLoftReachable(ctx, host) + if err != nil { + return err + } else if ready { + printhelper.PrintSuccessMessageRemoteInstall(host, l.Password, l.Log) + return nil + } + + // Print DNS Configuration + printhelper.PrintDNSConfiguration(host, l.Log) + + l.Log.Info("Waiting for you to configure DNS, so loft can be reached on https://" + host) + err = wait.PollUntilContextTimeout(ctx, 5*time.Second, config.Timeout(), true, func(ctx context.Context) (done bool, err error) { + return clihelper.IsLoftReachable(ctx, host) + }) + if err != nil { + return err + } + + l.Log.Done(product.Replace("Loft is reachable at https://") + host) + printhelper.PrintSuccessMessageRemoteInstall(host, l.Password, l.Log) + return nil +} + +func (l *LoftStarter) printVClusterProGettingStarted(url string) { + if product.Name() != licenseapi.VClusterPro { + return + } + + if l.isLoggedIn(url) { + l.Log.Donef("You are successfully logged into vCluster Platform!") + l.Log.WriteString(logrus.InfoLevel, "- Use `vcluster create` to create a new pro vCluster\n") + l.Log.WriteString(logrus.InfoLevel, "- Use `vcluster create --disable-pro` to create a new oss vCluster\n") + l.Log.WriteString(logrus.InfoLevel, "- Use `vcluster import` to import and upgrade an existing oss vCluster\n") + } else { + l.Log.Warnf("You are not logged into vCluster Platform yet, please run the below command to log into the vCluster Platform instance") + l.Log.WriteString(logrus.InfoLevel, "- Use `vcluster login "+url+"` to log into the vCluster Platform instance\n") + } +} + +func (l *LoftStarter) waitForLoft(ctx context.Context) (*corev1.Pod, error) { + // wait for loft pod to start + l.Log.Info(product.Replace("Waiting for Loft pod to be running...")) + loftPod, err := clihelper.WaitForReadyLoftPod(ctx, l.KubeClient, l.Namespace, l.Log) + l.Log.Donef(product.Replace("Loft pod successfully started")) + if err != nil { + return nil, err + } + + // ensure user admin secret is there + isNewPassword, err := clihelper.EnsureAdminPassword(ctx, l.KubeClient, l.RestConfig, l.Password, l.Log) + if err != nil { + return nil, err + } + + // If password is different than expected + if isNewPassword { + l.Password = "" + } + + return loftPod, nil +} diff --git a/pkg/cli/start/upgrade.go b/pkg/cli/start/upgrade.go new file mode 100644 index 000000000..c85065344 --- /dev/null +++ b/pkg/cli/start/upgrade.go @@ -0,0 +1,88 @@ +package start + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/clihelper" + "github.com/mgutz/ansi" + "github.com/pkg/errors" +) + +func (l *LoftStarter) upgradeLoft() error { + extraArgs := []string{} + if l.NoTunnel { + extraArgs = append(extraArgs, "--set-string", "env.DISABLE_LOFT_ROUTER=true") + } + if l.Password != "" { + extraArgs = append(extraArgs, "--set", "admin.password="+l.Password) + } + if l.Host != "" { + extraArgs = append(extraArgs, "--set", "ingress.enabled=true", "--set", "ingress.host="+l.Host) + } + if l.Version != "" { + extraArgs = append(extraArgs, "--version", l.Version) + } + if l.Product != "" { + extraArgs = append(extraArgs, "--set", "product="+l.Product) + } + + // Do not use --reuse-values if --reset flag is provided because this should be a new install and it will cause issues with `helm template` + if !l.Reset && l.ReuseValues { + extraArgs = append(extraArgs, "--reuse-values") + } + + if l.Values != "" { + absValuesPath, err := filepath.Abs(l.Values) + if err != nil { + return err + } + extraArgs = append(extraArgs, "--values", absValuesPath) + } + + chartName := l.ChartPath + chartRepo := "" + if chartName == "" { + chartName = l.ChartName + chartRepo = l.ChartRepo + } + + err := clihelper.UpgradeLoft(chartName, chartRepo, l.Context, l.Namespace, extraArgs, l.Log) + if err != nil { + if !l.Reset { + return errors.New(err.Error() + product.Replace(fmt.Sprintf("\n\nIf want to purge and reinstall Loft, run: %s\n", ansi.Color("loft start --reset", "green+b")))) + } + + // Try to purge Loft and retry install + l.Log.Info(product.Replace("Trying to delete objects blocking Loft installation")) + + manifests, err := clihelper.GetLoftManifests(chartName, chartRepo, l.Context, l.Namespace, extraArgs, l.Log) + if err != nil { + return err + } + + kubectlDelete := exec.Command("kubectl", "delete", "-f", "-", "--ignore-not-found=true", "--grace-period=0", "--force") + + buffer := bytes.Buffer{} + buffer.Write([]byte(manifests)) + + kubectlDelete.Stdin = &buffer + kubectlDelete.Stdout = os.Stdout + kubectlDelete.Stderr = os.Stderr + + // Ignoring potential errors here + _ = kubectlDelete.Run() + + // Retry Loft installation + err = clihelper.UpgradeLoft(chartName, chartRepo, l.Context, l.Namespace, extraArgs, l.Log) + if err != nil { + return errors.New(err.Error() + product.Replace(fmt.Sprintf("\n\nLoft installation failed. Reach out to get help:\n- via Slack: %s (fastest option)\n- via Online Chat: %s\n- via Email: %s\n", ansi.Color("https://slack.loft.sh/", "green+b"), ansi.Color("https://loft.sh/", "green+b"), ansi.Color("support@loft.sh", "green+b")))) + } + } + + return nil +} diff --git a/pkg/platform/clihelper/clihelper.go b/pkg/platform/clihelper/clihelper.go new file mode 100644 index 000000000..254e7ce79 --- /dev/null +++ b/pkg/platform/clihelper/clihelper.go @@ -0,0 +1,773 @@ +package clihelper + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "sort" + "strconv" + "strings" + "time" + + clusterv1 "github.com/loft-sh/agentapi/v4/pkg/apis/loft/cluster/v1" + storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/httputil" + "github.com/sirupsen/logrus" + + jsonpatch "github.com/evanphx/json-patch" + loftclientset "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset" + "github.com/loft-sh/loftctl/v4/pkg/config" + "github.com/loft-sh/loftctl/v4/pkg/portforward" + "github.com/loft-sh/log" + "github.com/loft-sh/log/survey" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/transport/spdy" + "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" +) + +// CriticalStatus container status +var CriticalStatus = map[string]bool{ + "Error": true, + "Unknown": true, + "ImagePullBackOff": true, + "CrashLoopBackOff": true, + "RunContainerError": true, + "ErrImagePull": true, + "CreateContainerConfigError": true, + "InvalidImageName": true, +} + +const defaultReleaseName = "loft" + +const LoftRouterDomainSecret = "loft-router-domain" + +var defaultDeploymentName = "loft" + +func GetDisplayName(name string, displayName string) string { + if displayName != "" { + return displayName + } + + return name +} + +func GetTableDisplayName(name string, displayName string) string { + if displayName != "" && displayName != name { + return displayName + " (" + name + ")" + } + + return name +} + +func DisplayName(entityInfo *clusterv1.EntityInfo) string { + if entityInfo == nil { + return "" + } else if entityInfo.DisplayName != "" { + return entityInfo.DisplayName + } else if entityInfo.Username != "" { + return entityInfo.Username + } + + return entityInfo.Name +} + +func GetLoftIngressHost(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (string, error) { + ingress, err := kubeClient.NetworkingV1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + if err != nil { + ingress, err := kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + if err != nil { + return "", err + } + // find host + if len(ingress.Spec.Rules) > 0 { + return ingress.Spec.Rules[0].Host, nil + } + } else { + // find host + if len(ingress.Spec.Rules) > 0 { + return ingress.Spec.Rules[0].Host, nil + } + } + + return "", fmt.Errorf("couldn't find any host in loft ingress '%s/loft-ingress', please make sure you have not changed any deployed resources", namespace) +} + +func WaitForReadyLoftPod(ctx context.Context, kubeClient kubernetes.Interface, namespace string, log log.Logger) (*corev1.Pod, error) { + // wait until we have a running loft pod + now := time.Now() + pod := &corev1.Pod{} + err := wait.PollUntilContextTimeout(ctx, time.Second*2, config.Timeout(), true, func(ctx context.Context) (bool, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "app=loft", + }) + if err != nil { + log.Warnf("Error trying to retrieve %s pod: %v", product.DisplayName(), err) + return false, nil + } else if len(pods.Items) == 0 { + if time.Now().After(now.Add(time.Second * 10)) { + log.Infof("Still waiting for a %s pod...", product.DisplayName()) + now = time.Now() + } + return false, nil + } + + sort.Slice(pods.Items, func(i, j int) bool { + return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) + }) + + loftPod := &pods.Items[0] + found := false + for _, containerStatus := range loftPod.Status.ContainerStatuses { + if containerStatus.State.Running != nil && containerStatus.Ready { + if containerStatus.Name == "manager" { + found = true + } + + continue + } else if containerStatus.State.Terminated != nil || (containerStatus.State.Waiting != nil && CriticalStatus[containerStatus.State.Waiting.Reason]) { + reason := "" + message := "" + if containerStatus.State.Terminated != nil { + reason = containerStatus.State.Terminated.Reason + message = containerStatus.State.Terminated.Message + } else if containerStatus.State.Waiting != nil { + reason = containerStatus.State.Waiting.Reason + message = containerStatus.State.Waiting.Message + } + + out, err := kubeClient.CoreV1().Pods(namespace).GetLogs(loftPod.Name, &corev1.PodLogOptions{ + Container: "manager", + }).Do(context.Background()).Raw() + if err != nil { + return false, fmt.Errorf("there seems to be an issue with %s starting up: %s (%s). Please reach out to our support at https://loft.sh/", product.DisplayName(), message, reason) + } + if strings.Contains(string(out), "register instance: Post \"https://license.loft.sh/register\": dial tcp") { + return false, fmt.Errorf("%[1]s logs: \n%[2]v \nThere seems to be an issue with %[1]s starting up. Looks like you try to install %[1]s into an air-gapped environment, please reach out to our support at https://loft.sh/ for an offline license", product.DisplayName(), string(out)) + } + + return false, fmt.Errorf("%[1]s logs: \n%v \nThere seems to be an issue with %[1]s starting up: %[2]s (%[3]s). Please reach out to our support at https://loft.sh/", product.DisplayName(), string(out), message, reason) + } else if containerStatus.State.Waiting != nil && time.Now().After(now.Add(time.Second*10)) { + if containerStatus.State.Waiting.Message != "" { + log.Infof("Please keep waiting, %s container is still starting up: %s (%s)", product.DisplayName(), containerStatus.State.Waiting.Message, containerStatus.State.Waiting.Reason) + } else if containerStatus.State.Waiting.Reason != "" { + log.Infof("Please keep waiting, %s container is still starting up: %s", product.DisplayName(), containerStatus.State.Waiting.Reason) + } else { + log.Infof("Please keep waiting, %s container is still starting up...", product.DisplayName()) + } + + now = time.Now() + } + + return false, nil + } + + pod = loftPod + return found, nil + }) + if err != nil { + return nil, err + } + + return pod, nil +} + +func StartPortForwarding(ctx context.Context, config *rest.Config, client kubernetes.Interface, pod *corev1.Pod, localPort string, log log.Logger) (chan struct{}, error) { + log.WriteString(logrus.InfoLevel, "\n") + log.Infof("Starting port-forwarding to the %s pod", product.DisplayName()) + execRequest := client.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(pod.Name). + Namespace(pod.Namespace). + SubResource("portforward") + + t, upgrader, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: t}, "POST", execRequest.URL()) + errChan := make(chan error) + readyChan := make(chan struct{}) + stopChan := make(chan struct{}) + targetPort := getPortForwardingTargetPort(pod) + forwarder, err := portforward.New(dialer, []string{localPort + ":" + strconv.Itoa(targetPort)}, stopChan, readyChan, errChan, io.Discard, io.Discard) + if err != nil { + return nil, err + } + + go func() { + err := forwarder.ForwardPorts(ctx) + if err != nil { + errChan <- err + } + }() + + // wait till ready + select { + case err = <-errChan: + return nil, err + case <-readyChan: + case <-stopChan: + return nil, fmt.Errorf("stopped before ready") + } + + // start watcher + go func() { + for { + select { + case <-stopChan: + return + case err = <-errChan: + log.Infof("error during port forwarder: %v", err) + close(stopChan) + return + } + } + }() + + return stopChan, nil +} + +func GetLoftDefaultPassword(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (string, error) { + loftNamespace, err := kubeClient.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + loftNamespace, err := kubeClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + }, metav1.CreateOptions{}) + if err != nil { + return "", err + } + + return string(loftNamespace.UID), nil + } + + return "", err + } + + return string(loftNamespace.UID), nil +} + +type version struct { + Version string `json:"version"` +} + +func IsLoftReachable(ctx context.Context, host string) (bool, error) { + // wait until loft is reachable at the given url + client := &http.Client{ + Transport: httputil.InsecureTransport(), + } + url := "https://" + host + "/version" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return false, fmt.Errorf("error creating request with context: %w", err) + } + resp, err := client.Do(req) + if err == nil && resp.StatusCode == http.StatusOK { + out, err := io.ReadAll(resp.Body) + if err != nil { + return false, nil + } + + v := &version{} + err = json.Unmarshal(out, v) + if err != nil { + return false, fmt.Errorf("error decoding response from %s: %w. Try running '%s --reset'", url, err, product.StartCmd()) + } else if v.Version == "" { + return false, fmt.Errorf("unexpected response from %s: %s. Try running '%s --reset'", url, string(out), product.StartCmd()) + } + + return true, nil + } + + return false, nil +} + +func IsLocalCluster(host string, log log.Logger) bool { + url, err := url.Parse(host) + if err != nil { + log.Warnf("Couldn't parse kube context host url: %v", err) + return false + } + + hostname := url.Hostname() + ip := net.ParseIP(hostname) + if ip != nil { + if IsPrivateIP(ip) { + return true + } + } + + if hostname == "localhost" || strings.HasSuffix(hostname, ".internal") || strings.HasSuffix(hostname, ".localhost") { + return true + } + + return false +} + +var privateIPBlocks []*net.IPNet + +func init() { + for _, cidr := range []string{ + "127.0.0.0/8", // IPv4 loopback + "10.0.0.0/8", // RFC1918 + "172.16.0.0/12", // RFC1918 + "192.168.0.0/16", // RFC1918 + "::1/128", // IPv6 loopback + "fe80::/10", // IPv6 link-local + "fc00::/7", // IPv6 unique local addr + } { + _, block, _ := net.ParseCIDR(cidr) + privateIPBlocks = append(privateIPBlocks, block) + } +} + +// IsPrivateIP checks if a given ip is private +func IsPrivateIP(ip net.IP) bool { + for _, block := range privateIPBlocks { + if block.Contains(ip) { + return true + } + } + + return false +} + +func EnterHostNameQuestion(log log.Logger) (string, error) { + return log.Question(&survey.QuestionOptions{ + Question: fmt.Sprintf("Enter a hostname for your %s instance (e.g. loft.my-domain.tld): \n ", product.DisplayName()), + ValidationFunc: func(answer string) error { + u, err := url.Parse("https://" + answer) + if err != nil || u.Path != "" || u.Port() != "" || len(strings.Split(answer, ".")) < 2 { + return fmt.Errorf("please enter a valid hostname without protocol (https://), without path and without port, e.g. loft.my-domain.tld") + } + return nil + }, + }) +} + +func IsLoftAlreadyInstalled(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (bool, error) { + _, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + + return false, fmt.Errorf("error accessing kubernetes cluster: %w", err) + } + + return true, nil +} + +func UninstallLoft(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, kubeContext, namespace string, log log.Logger) error { + log.Infof("Uninstalling %s...", product.DisplayName()) + releaseName := defaultReleaseName + deploy, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } else if deploy != nil && deploy.Labels != nil && deploy.Labels["release"] != "" { + releaseName = deploy.Labels["release"] + } + + args := []string{ + "uninstall", + releaseName, + "--kube-context", + kubeContext, + "--namespace", + namespace, + } + log.Infof("Executing command: helm %s", strings.Join(args, " ")) + output, err := exec.Command("helm", args...).CombinedOutput() + if err != nil { + log.Errorf("error during helm command: %s (%v)", string(output), err) + } + + // we also cleanup the validating webhook configuration and apiservice + apiRegistrationClient, err := clientset.NewForConfig(restConfig) + if err != nil { + return err + } + + err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1.management.loft.sh", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = deleteUser(ctx, restConfig, "admin") + if err != nil { + return err + } + + err = kubeClient.CoreV1().Secrets(namespace).Delete(context.Background(), "loft-user-secret-admin", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = kubeClient.CoreV1().Secrets(namespace).Delete(context.Background(), LoftRouterDomainSecret, metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + // we also cleanup the validating webhook configuration and apiservice + err = kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(ctx, "loft-agent", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1alpha1.tenancy.kiosk.sh", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1.cluster.loft.sh", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, "loft-agent-controller", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, "loft-applied-defaults", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + log.WriteString(logrus.InfoLevel, "\n") + log.Done(product.Replace("Successfully uninstalled Loft")) + log.WriteString(logrus.InfoLevel, "\n") + + return nil +} + +func deleteUser(ctx context.Context, restConfig *rest.Config, name string) error { + loftClient, err := loftclientset.NewForConfig(restConfig) + if err != nil { + return err + } + + user, err := loftClient.StorageV1().Users().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil + } else if len(user.Finalizers) > 0 { + user.Finalizers = nil + _, err = loftClient.StorageV1().Users().Update(ctx, user, metav1.UpdateOptions{}) + if err != nil { + if kerrors.IsConflict(err) { + return deleteUser(ctx, restConfig, name) + } + + return err + } + } + + err = loftClient.StorageV1().Users().Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + return nil +} + +func EnsureIngressController(ctx context.Context, kubeClient kubernetes.Interface, kubeContext string, log log.Logger) error { + // first create an ingress controller + const ( + YesOption = "Yes" + NoOption = "No, I already have an ingress controller installed." + ) + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Ingress controller required. Should the nginx-ingress controller be installed?", + DefaultValue: YesOption, + Options: []string{ + YesOption, + NoOption, + }, + }) + if err != nil { + return err + } + + if answer == YesOption { + args := []string{ + "install", + "ingress-nginx", + "ingress-nginx", + "--repository-config=''", + "--repo", + "https://kubernetes.github.io/ingress-nginx", + "--kube-context", + kubeContext, + "--namespace", + "ingress-nginx", + "--create-namespace", + "--set-string", + "controller.config.hsts=false", + "--wait", + } + log.WriteString(logrus.InfoLevel, "\n") + log.Infof("Executing command: helm %s\n", strings.Join(args, " ")) + log.Info("Waiting for ingress controller deployment, this can take several minutes...") + helmCmd := exec.Command("helm", args...) + output, err := helmCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error during helm command: %s (%w)", string(output), err) + } + + list, err := kubeClient.CoreV1().Secrets("ingress-nginx").List(ctx, metav1.ListOptions{ + LabelSelector: "name=ingress-nginx,owner=helm,status=deployed", + }) + if err != nil { + return err + } + + if len(list.Items) == 1 { + secret := list.Items[0] + originalSecret := secret.DeepCopy() + secret.Labels["loft.sh/app"] = "true" + if secret.Annotations == nil { + secret.Annotations = map[string]string{} + } + + secret.Annotations["loft.sh/url"] = "https://kubernetes.github.io/ingress-nginx" + originalJSON, err := json.Marshal(originalSecret) + if err != nil { + return err + } + modifiedJSON, err := json.Marshal(secret) + if err != nil { + return err + } + data, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return err + } + _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Patch(ctx, secret.Name, types.MergePatchType, data, metav1.PatchOptions{}) + if err != nil { + return err + } + } + + log.Done("Successfully installed ingress-nginx to your kubernetes cluster!") + } + + return nil +} + +func UpgradeLoft(chartName, chartRepo, kubeContext, namespace string, extraArgs []string, log log.Logger) error { + // now we install loft + args := []string{ + "upgrade", + defaultReleaseName, + chartName, + "--install", + "--reuse-values", + "--create-namespace", + "--repository-config=''", + "--kube-context", + kubeContext, + "--namespace", + namespace, + } + if chartRepo != "" { + args = append(args, "--repo", chartRepo) + } + args = append(args, extraArgs...) + + log.WriteString(logrus.InfoLevel, "\n") + log.Infof("Executing command: helm %s\n", strings.Join(args, " ")) + log.Info("Waiting for helm command, this can take up to several minutes...") + helmCmd := exec.Command("helm", args...) + if chartRepo != "" { + helmWorkDir, err := getHelmWorkdir(chartName) + if err != nil { + return err + } + + helmCmd.Dir = helmWorkDir + } + output, err := helmCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error during helm command: %s (%w)", string(output), err) + } + + log.Donef("%s has been deployed to your cluster!", product.DisplayName()) + return nil +} + +func GetLoftManifests(chartName, chartRepo, kubeContext, namespace string, extraArgs []string, _ log.Logger) (string, error) { + args := []string{ + "template", + defaultReleaseName, + chartName, + "--repository-config=''", + "--kube-context", + kubeContext, + "--namespace", + namespace, + } + if chartRepo != "" { + args = append(args, "--repo", chartRepo) + } + args = append(args, extraArgs...) + + helmCmd := exec.Command("helm", args...) + if chartRepo != "" { + helmWorkDir, err := getHelmWorkdir(chartName) + if err != nil { + return "", err + } + + helmCmd.Dir = helmWorkDir + } + output, err := helmCmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("error during helm command: %s (%w)", string(output), err) + } + return string(output), nil +} + +// Return the directory where the `helm` commands should be executed or error if none can be found/created +// Uses current workdir by default unless it contains a folder with the chart name +func getHelmWorkdir(chartName string) (string, error) { + // If chartName folder exists, check temp dir next + if _, err := os.Stat(chartName); err == nil { + tempDir := os.TempDir() + + // If tempDir/chartName folder exists, create temp folder + if _, err := os.Stat(path.Join(tempDir, chartName)); err == nil { + tempDir, err = os.MkdirTemp(tempDir, chartName) + if err != nil { + return "", errors.New("problematic directory `" + chartName + "` found: please execute command in a different folder") + } + } + + // Use tempDir + return tempDir, nil + } + + // Use current workdir + return "", nil +} + +// Makes sure that admin user and password secret exists +// Returns (true, nil) if everything is correct but password is different from parameter `password` +func EnsureAdminPassword(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, password string, log log.Logger) (bool, error) { + loftClient, err := loftclientset.NewForConfig(restConfig) + if err != nil { + return false, err + } + + admin, err := loftClient.StorageV1().Users().Get(ctx, "admin", metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return false, err + } else if admin == nil { + admin, err = loftClient.StorageV1().Users().Create(ctx, &storagev1.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "admin", + }, + Spec: storagev1.UserSpec{ + Username: "admin", + Email: "test@domain.tld", + Subject: "admin", + Groups: []string{"system:masters"}, + PasswordRef: &storagev1.SecretRef{ + SecretName: "loft-user-secret-admin", + SecretNamespace: "loft", + Key: "password", + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return false, err + } + } else if admin.Spec.PasswordRef == nil || admin.Spec.PasswordRef.SecretName == "" || admin.Spec.PasswordRef.SecretNamespace == "" { + return false, nil + } + + key := admin.Spec.PasswordRef.Key + if key == "" { + key = "password" + } + + passwordHash := fmt.Sprintf("%x", sha256.Sum256([]byte(password))) + + secret, err := kubeClient.CoreV1().Secrets(admin.Spec.PasswordRef.SecretNamespace).Get(ctx, admin.Spec.PasswordRef.SecretName, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return false, err + } else if err == nil { + existingPasswordHash, keyExists := secret.Data[key] + if keyExists { + return (string(existingPasswordHash) != passwordHash), nil + } + + secret.Data[key] = []byte(passwordHash) + _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Update(ctx, secret, metav1.UpdateOptions{}) + if err != nil { + return false, errors.Wrap(err, "update admin password secret") + } + return false, nil + } + + // create the password secret if it was not found, this can happen if you delete the loft namespace without deleting the admin user + secret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: admin.Spec.PasswordRef.SecretName, + Namespace: admin.Spec.PasswordRef.SecretNamespace, + }, + Data: map[string][]byte{ + key: []byte(passwordHash), + }, + } + _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Create(ctx, secret, metav1.CreateOptions{}) + if err != nil { + return false, errors.Wrap(err, "create admin password secret") + } + + log.Info("Successfully recreated admin password secret") + return false, nil +} + +func IsLoftInstalledLocally(ctx context.Context, kubeClient kubernetes.Interface, namespace string) bool { + _, err := kubeClient.NetworkingV1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + _, err = kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + return kerrors.IsNotFound(err) + } + + return kerrors.IsNotFound(err) +} + +func getPortForwardingTargetPort(pod *corev1.Pod) int { + for _, container := range pod.Spec.Containers { + if container.Name == "manager" { + for _, port := range container.Ports { + if port.Name == "https" { + return int(port.ContainerPort) + } + } + } + } + + return 10443 +} diff --git a/pkg/platform/defaults/defaults.go b/pkg/platform/defaults/defaults.go new file mode 100644 index 000000000..7b7fdd945 --- /dev/null +++ b/pkg/platform/defaults/defaults.go @@ -0,0 +1,111 @@ +package platformdefaults + +import ( + "encoding/json" + "os" + "path/filepath" + + "github.com/loft-sh/loftctl/v4/pkg/client" + "github.com/pkg/errors" +) + +const ( + KeyProject = "project" +) + +var ( + ConfigFile = "defaults.json" + ConfigFolder = client.CacheFolder + + DefaultKeys = []string{KeyProject} +) + +// Defaults holds the default values +type Defaults struct { + folderPath string + fileName string + fullPath string + + values map[string]string +} + +// NewFromPath creates a new defaults instance from the given path +func NewFromPath(folderPath string, fileName string) (*Defaults, error) { + fullPath := filepath.Join(folderPath, fileName) + defaults := &Defaults{folderPath, fileName, fullPath, make(map[string]string)} + + if err := defaults.ensureConfigFile(); err != nil { + return defaults, errors.Wrap(err, "no config file") + } + + contents, err := os.ReadFile(fullPath) + if err != nil { + return defaults, errors.Wrap(err, "read config file") + } + if len(contents) == 0 { + return defaults, nil + } + if err = json.Unmarshal(contents, &defaults.values); err != nil { + return defaults, errors.Wrap(err, "invalid json") + } + + return defaults, nil +} + +// Set sets the given key to the given value and persists the defaults on disk +func (d *Defaults) Set(key string, value string) error { + if !IsSupportedKey(key) { + return errors.Errorf("key %s is not supported", key) + } + + d.values[key] = value + json, err := json.Marshal(d.values) + if err != nil { + return errors.Wrap(err, "invalid json") + } + if err = os.WriteFile(d.fullPath, json, os.ModePerm); err != nil { + return errors.Wrap(err, "write config file") + } + + return nil +} + +// Get returns the value for the given key +func (d *Defaults) Get(key string, fallback string) (string, error) { + if !IsSupportedKey(key) { + return fallback, errors.Errorf("key %s is not supported", key) + } + + return d.values[key], nil +} + +// IsSupportedKey returns true if the given key is supported +func IsSupportedKey(key string) bool { + for _, k := range DefaultKeys { + if k == key { + return true + } + } + + return false +} + +func (d *Defaults) ensureConfigFile() error { + _, err := os.Stat(d.fullPath) + // file exists + if err == nil { + return nil + } + + if os.IsNotExist(err) { + if err := os.MkdirAll(d.folderPath, os.ModePerm); err != nil { + return errors.Wrap(err, "create cache folder") + } + if _, err := os.Create(d.fullPath); err != nil { + return errors.Wrap(err, "create defaults file") + } + + return nil + } + return err +} diff --git a/pkg/platform/kube/client.go b/pkg/platform/kube/client.go new file mode 100644 index 000000000..21699183b --- /dev/null +++ b/pkg/platform/kube/client.go @@ -0,0 +1,54 @@ +package kube + +import ( + agentloftclient "github.com/loft-sh/agentapi/v4/pkg/client/loft/clientset_generated/clientset" + loftclient "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset" + + "github.com/pkg/errors" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +type Interface interface { + kubernetes.Interface + Loft() loftclient.Interface + Agent() agentloftclient.Interface +} + +func NewForConfig(c *rest.Config) (Interface, error) { + kubeClient, err := kubernetes.NewForConfig(c) + if err != nil { + return nil, errors.Wrap(err, "create kube client") + } + + loftClient, err := loftclient.NewForConfig(c) + if err != nil { + return nil, errors.Wrap(err, "create loft client") + } + + agentLoftClient, err := agentloftclient.NewForConfig(c) + if err != nil { + return nil, errors.Wrap(err, "create kiosk client") + } + + return &client{ + Interface: kubeClient, + loftClient: loftClient, + agentLoftClient: agentLoftClient, + }, nil +} + +type client struct { + kubernetes.Interface + loftClient loftclient.Interface + agentLoftClient agentloftclient.Interface +} + +func (c *client) Loft() loftclient.Interface { + return c.loftClient +} + +func (c *client) Agent() agentloftclient.Interface { + return c.agentLoftClient +} diff --git a/pkg/platform/kubeconfig/kubeconfig.go b/pkg/platform/kubeconfig/kubeconfig.go new file mode 100644 index 000000000..602ecf63e --- /dev/null +++ b/pkg/platform/kubeconfig/kubeconfig.go @@ -0,0 +1,266 @@ +package kubeconfig + +import ( + "io" + "os" + "path/filepath" + "strings" + + "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" +) + +type ContextOptions struct { + Name string + Server string + CaData []byte + ConfigPath string + InsecureSkipTLSVerify bool + DirectClusterEndpointEnabled bool + VirtualClusterAccessPointEnabled bool + + Token string + ClientKeyData []byte + ClientCertificateData []byte + + CurrentNamespace string + SetActive bool +} + +func SpaceInstanceContextName(projectName, spaceInstanceName string) string { + return "loft_" + spaceInstanceName + "_" + projectName +} + +func VirtualClusterInstanceContextName(projectName, virtualClusterInstance string) string { + return "loft-vcluster_" + virtualClusterInstance + "_" + projectName +} + +func virtualClusterInstanceProjectAndNameFromContextName(contextName string) (string, string) { + return strings.Split(contextName, "_")[2], strings.Split(contextName, "_")[1] +} + +func SpaceContextName(clusterName, namespaceName string) string { + contextName := "loft_" + if namespaceName != "" { + contextName += namespaceName + "_" + } + + contextName += clusterName + return contextName +} + +func VirtualClusterContextName(clusterName, namespaceName, virtualClusterName string) string { + return "loft-vcluster_" + virtualClusterName + "_" + namespaceName + "_" + clusterName +} + +func ManagementContextName() string { + return "loft-management" +} + +func ParseContext(contextName string) (isLoftContext bool, cluster string, namespace string, vCluster string) { + splitted := strings.Split(contextName, "_") + if len(splitted) == 0 || (splitted[0] != "loft" && splitted[0] != "loft-vcluster") { + return false, "", "", "" + } + + // cluster or space context + if splitted[0] == "loft" { + if len(splitted) > 3 || len(splitted) == 1 { + return false, "", "", "" + } else if len(splitted) == 2 { + return true, splitted[1], "", "" + } + + return true, splitted[2], splitted[1], "" + } + + // vCluster context + if len(splitted) != 4 { + return false, "", "", "" + } + + return true, splitted[3], splitted[2], splitted[1] +} + +func CurrentContext() (string, error) { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return "", err + } + + return config.CurrentContext, nil +} + +// DeleteContext deletes the context with the given name from the kube config +func DeleteContext(contextName string) error { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return err + } + + delete(config.Contexts, contextName) + delete(config.Clusters, contextName) + delete(config.AuthInfos, contextName) + + if config.CurrentContext == contextName { + config.CurrentContext = "" + for name := range config.Contexts { + config.CurrentContext = name + break + } + } + + // Save the config + return clientcmd.ModifyConfig(clientcmd.NewDefaultClientConfigLoadingRules(), config, false) +} + +func updateKubeConfig(contextName string, cluster *api.Cluster, authInfo *api.AuthInfo, namespaceName string, setActive bool) error { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return err + } + + config.Clusters[contextName] = cluster + config.AuthInfos[contextName] = authInfo + + // Update kube context + context := api.NewContext() + context.Cluster = contextName + context.AuthInfo = contextName + context.Namespace = namespaceName + + config.Contexts[contextName] = context + if setActive { + config.CurrentContext = contextName + } + + // Save the config + return clientcmd.ModifyConfig(clientcmd.NewDefaultClientConfigLoadingRules(), config, false) +} + +func printKubeConfigTo(contextName string, cluster *api.Cluster, authInfo *api.AuthInfo, namespaceName string, writer io.Writer) error { + config := api.NewConfig() + + config.Clusters[contextName] = cluster + config.AuthInfos[contextName] = authInfo + + // Update kube context + context := api.NewContext() + context.Cluster = contextName + context.AuthInfo = contextName + context.Namespace = namespaceName + + config.Contexts[contextName] = context + config.CurrentContext = contextName + + // set kind & version + config.APIVersion = "v1" + config.Kind = "Config" + + out, err := clientcmd.Write(*config) + if err != nil { + return err + } + + _, err = writer.Write(out) + return err +} + +// UpdateKubeConfig updates the kube config and adds the virtual cluster context +func UpdateKubeConfig(options ContextOptions) error { + contextName, cluster, authInfo, err := createContext(options) + if err != nil { + return err + } + + // we don't want to set the space name here as the default namespace in the virtual cluster, because it couldn't exist + return updateKubeConfig(contextName, cluster, authInfo, options.CurrentNamespace, options.SetActive) +} + +// PrintKubeConfigTo prints the given config to the writer +func PrintKubeConfigTo(options ContextOptions, writer io.Writer) error { + contextName, cluster, authInfo, err := createContext(options) + if err != nil { + return err + } + + // we don't want to set the space name here as the default namespace in the virtual cluster, because it couldn't exist + return printKubeConfigTo(contextName, cluster, authInfo, options.CurrentNamespace, writer) +} + +// PrintTokenKubeConfig writes the kube config to the os.Stdout +func PrintTokenKubeConfig(restConfig *rest.Config, token string) error { + contextName, cluster, authInfo := createTokenContext(restConfig, token) + + return printKubeConfigTo(contextName, cluster, authInfo, "", os.Stdout) +} + +// WriteTokenKubeConfig writes the kube config to the io.Writer +func WriteTokenKubeConfig(restConfig *rest.Config, token string, w io.Writer) error { + contextName, cluster, authInfo := createTokenContext(restConfig, token) + + return printKubeConfigTo(contextName, cluster, authInfo, "", w) +} + +func createTokenContext(restConfig *rest.Config, token string) (string, *api.Cluster, *api.AuthInfo) { + contextName := "default" + + cluster := api.NewCluster() + cluster.Server = restConfig.Host + cluster.InsecureSkipTLSVerify = restConfig.Insecure + cluster.CertificateAuthority = restConfig.CAFile + cluster.CertificateAuthorityData = restConfig.CAData + cluster.TLSServerName = restConfig.ServerName + + authInfo := api.NewAuthInfo() + authInfo.Token = token + + return contextName, cluster, authInfo +} + +func createContext(options ContextOptions) (string, *api.Cluster, *api.AuthInfo, error) { + contextName := options.Name + cluster := api.NewCluster() + cluster.Server = options.Server + cluster.CertificateAuthorityData = options.CaData + cluster.InsecureSkipTLSVerify = options.InsecureSkipTLSVerify + + authInfo := api.NewAuthInfo() + if options.Token != "" || options.ClientCertificateData != nil || options.ClientKeyData != nil { + authInfo.Token = options.Token + authInfo.ClientKeyData = options.ClientKeyData + authInfo.ClientCertificateData = options.ClientCertificateData + } else { + command, err := os.Executable() + if err != nil { + return "", nil, nil, err + } + + absConfigPath, err := filepath.Abs(options.ConfigPath) + if err != nil { + return "", nil, nil, err + } + + if options.VirtualClusterAccessPointEnabled { + projectName, virtualClusterName := virtualClusterInstanceProjectAndNameFromContextName(contextName) + authInfo.Exec = &api.ExecConfig{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Command: command, + Args: []string{"token", "--silent", "--project", projectName, "--virtual-cluster", virtualClusterName}, + } + } else { + authInfo.Exec = &api.ExecConfig{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Command: command, + Args: []string{"token", "--silent", "--config", absConfigPath}, + } + if options.DirectClusterEndpointEnabled { + authInfo.Exec.Args = append(authInfo.Exec.Args, "--direct-cluster-endpoint") + } + } + } + + return contextName, cluster, authInfo, nil +} diff --git a/pkg/platform/loftclient/client.go b/pkg/platform/loftclient/client.go new file mode 100644 index 000000000..d1321d1df --- /dev/null +++ b/pkg/platform/loftclient/client.go @@ -0,0 +1,627 @@ +package client + +import ( + "context" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/blang/semver" + "github.com/loft-sh/loftctl/v4/pkg/client/naming" + "github.com/loft-sh/loftctl/v4/pkg/kubeconfig" + + "github.com/loft-sh/api/v4/pkg/auth" + "github.com/loft-sh/api/v4/pkg/product" + + managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" + storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + "github.com/loft-sh/loftctl/v4/pkg/constants" + "github.com/loft-sh/loftctl/v4/pkg/kube" + "github.com/loft-sh/loftctl/v4/pkg/upgrade" + "github.com/loft-sh/log" + "github.com/mitchellh/go-homedir" + perrors "github.com/pkg/errors" + "github.com/skratchdot/open-golang/open" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +var CacheFolder = ".loft" + +// DefaultCacheConfig is the path to the config +var DefaultCacheConfig = "config.json" + +const ( + VersionPath = "%s/version" + LoginPath = "%s/login?cli=true" + RedirectPath = "%s/spaces" + AccessKeyPath = "%s/profile/access-keys" + RefreshToken = time.Minute * 30 +) + +func init() { + hd, _ := homedir.Dir() + if folder, ok := os.LookupEnv(constants.LoftCacheFolderEnv); ok { + CacheFolder = filepath.Join(hd, folder) + } else { + CacheFolder = filepath.Join(hd, CacheFolder) + } + DefaultCacheConfig = filepath.Join(CacheFolder, DefaultCacheConfig) +} + +type Client interface { + Management() (kube.Interface, error) + ManagementConfig() (*rest.Config, error) + + SpaceInstance(project, name string) (kube.Interface, error) + SpaceInstanceConfig(project, name string) (*rest.Config, error) + + VirtualClusterInstance(project, name string) (kube.Interface, error) + VirtualClusterInstanceConfig(project, name string) (*rest.Config, error) + + Cluster(cluster string) (kube.Interface, error) + ClusterConfig(cluster string) (*rest.Config, error) + + VirtualCluster(cluster, namespace, virtualCluster string) (kube.Interface, error) + VirtualClusterConfig(cluster, namespace, virtualCluster string) (*rest.Config, error) + + Login(host string, insecure bool, log log.Logger) error + LoginWithAccessKey(host, accessKey string, insecure bool) error + LoginRaw(host, accessKey string, insecure bool) error + + Logout(ctx context.Context) error + + Version() (*auth.Version, error) + Config() *Config + DirectClusterEndpointToken(forceRefresh bool) (string, error) + VirtualClusterAccessPointCertificate(project, virtualCluster string, forceRefresh bool) (string, string, error) + Save() error +} + +func NewClient() Client { + return &client{ + config: &Config{}, + } +} + +func NewClientFromPath(path string) (Client, error) { + c := &client{ + configPath: path, + } + + err := c.initConfig() + if err != nil { + return nil, err + } + + return c, nil +} + +type client struct { + config *Config + configPath string + configOnce sync.Once +} + +// Logout implements Client. +func (c *client) Logout(ctx context.Context) error { + managementClient, err := c.Management() + if err != nil { + return fmt.Errorf("create management client: %w", err) + } + + self, err := managementClient.Loft().ManagementV1().Selves().Create(ctx, &managementv1.Self{}, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("get self: %w", err) + } + + if self.Status.AccessKey != "" && self.Status.AccessKeyType == storagev1.AccessKeyTypeLogin { + err = managementClient.Loft().ManagementV1().OwnedAccessKeys().Delete(ctx, self.Status.AccessKey, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("delete access key: %w", err) + } + } + + return nil +} + +func (c *client) initConfig() error { + var retErr error + c.configOnce.Do(func() { + // load the config or create new one if not found + content, err := os.ReadFile(c.configPath) + if err != nil { + if os.IsNotExist(err) { + c.config = NewConfig() + return + } + + retErr = err + return + } + + config := &Config{ + VirtualClusterAccessPointCertificates: make(map[string]VirtualClusterCertificatesEntry), + } + err = json.Unmarshal(content, config) + if err != nil { + retErr = err + return + } + + c.config = config + }) + + return retErr +} + +func (c *client) VirtualClusterAccessPointCertificate(project, virtualCluster string, forceRefresh bool) (string, string, error) { + if c.config == nil { + return "", "", perrors.New("no config loaded") + } + + contextName := kubeconfig.VirtualClusterInstanceContextName(project, virtualCluster) + + // see if we have stored cert data for this vci + now := metav1.Now() + cachedVirtualClusterAccessPointCertificate, ok := c.config.VirtualClusterAccessPointCertificates[contextName] + if !forceRefresh && ok && cachedVirtualClusterAccessPointCertificate.LastRequested.Add(RefreshToken).After(now.Time) && cachedVirtualClusterAccessPointCertificate.ExpirationTime.After(now.Time) { + return cachedVirtualClusterAccessPointCertificate.CertificateData, cachedVirtualClusterAccessPointCertificate.KeyData, nil + } + + // refresh token + managementClient, err := c.Management() + if err != nil { + return "", "", err + } + + kubeConfigResponse, err := managementClient.Loft().ManagementV1().VirtualClusterInstances(naming.ProjectNamespace(project)).GetKubeConfig( + context.Background(), + virtualCluster, + &managementv1.VirtualClusterInstanceKubeConfig{ + Spec: managementv1.VirtualClusterInstanceKubeConfigSpec{ + CertificateTTL: ptr.To[int32](86_400), + }, + }, + metav1.CreateOptions{}, + ) + if err != nil { + return "", "", perrors.Wrap(err, "fetch certificate data") + } + + certificateData, keyData, err := getCertificateAndKeyDataFromKubeConfig(kubeConfigResponse.Status.KubeConfig) + if err != nil { + return "", "", err + } + + if c.config.VirtualClusterAccessPointCertificates == nil { + c.config.VirtualClusterAccessPointCertificates = make(map[string]VirtualClusterCertificatesEntry) + } + c.config.VirtualClusterAccessPointCertificates[contextName] = VirtualClusterCertificatesEntry{ + CertificateData: certificateData, + KeyData: keyData, + LastRequested: now, + ExpirationTime: now.Add(86_400 * time.Second), + } + + err = c.Save() + if err != nil { + return "", "", perrors.Wrap(err, "save config") + } + + return certificateData, keyData, nil +} + +func getCertificateAndKeyDataFromKubeConfig(config string) (string, string, error) { + clientCfg, err := clientcmd.NewClientConfigFromBytes([]byte(config)) + if err != nil { + return "", "", err + } + + apiCfg, err := clientCfg.RawConfig() + if err != nil { + return "", "", err + } + + return string(apiCfg.AuthInfos["vcluster"].ClientCertificateData), string(apiCfg.AuthInfos["vcluster"].ClientKeyData), nil +} + +func (c *client) DirectClusterEndpointToken(forceRefresh bool) (string, error) { + if c.config == nil { + return "", perrors.New("no config loaded") + } + + // check if we can use existing token + now := metav1.Now() + if !forceRefresh && c.config.DirectClusterEndpointToken != "" && c.config.DirectClusterEndpointTokenRequested != nil && c.config.DirectClusterEndpointTokenRequested.Add(RefreshToken).After(now.Time) { + return c.config.DirectClusterEndpointToken, nil + } + + // refresh token + managementClient, err := c.Management() + if err != nil { + return "", err + } + + clusterGatewayToken, err := managementClient.Loft().ManagementV1().DirectClusterEndpointTokens().Create(context.Background(), &managementv1.DirectClusterEndpointToken{}, metav1.CreateOptions{}) + if err != nil { + if c.config.DirectClusterEndpointToken != "" && c.config.DirectClusterEndpointTokenRequested != nil && c.config.DirectClusterEndpointTokenRequested.Add(time.Hour*24).After(now.Time) { + return c.config.DirectClusterEndpointToken, nil + } + + return "", err + } else if clusterGatewayToken.Status.Token == "" { + return "", perrors.New("retrieved an empty token") + } + + c.config.DirectClusterEndpointToken = clusterGatewayToken.Status.Token + c.config.DirectClusterEndpointTokenRequested = &now + err = c.Save() + if err != nil { + return "", perrors.Wrap(err, "save config") + } + + return c.config.DirectClusterEndpointToken, nil +} + +func (c *client) Save() error { + if c.configPath == "" { + return nil + } + if c.config == nil { + return perrors.New("no config to write") + } + if c.config.Kind == "" { + c.config.Kind = "Config" + } + if c.config.APIVersion == "" { + c.config.APIVersion = "storage.loft.sh/v1" + } + + err := os.MkdirAll(filepath.Dir(c.configPath), 0o755) + if err != nil { + return err + } + + out, err := json.Marshal(c.config) + if err != nil { + return err + } + + return os.WriteFile(c.configPath, out, 0o660) +} + +func (c *client) ManagementConfig() (*rest.Config, error) { + return c.restConfig("/kubernetes/management") +} + +func (c *client) Management() (kube.Interface, error) { + restConfig, err := c.ManagementConfig() + if err != nil { + return nil, err + } + + return kube.NewForConfig(restConfig) +} + +func (c *client) SpaceInstanceConfig(project, name string) (*rest.Config, error) { + return c.restConfig("/kubernetes/project/" + project + "/space/" + name) +} + +func (c *client) SpaceInstance(project, name string) (kube.Interface, error) { + restConfig, err := c.SpaceInstanceConfig(project, name) + if err != nil { + return nil, err + } + + return kube.NewForConfig(restConfig) +} + +func (c *client) VirtualClusterInstanceConfig(project, name string) (*rest.Config, error) { + return c.restConfig("/kubernetes/project/" + project + "/virtualcluster/" + name) +} + +func (c *client) VirtualClusterInstance(project, name string) (kube.Interface, error) { + restConfig, err := c.VirtualClusterInstanceConfig(project, name) + if err != nil { + return nil, err + } + + return kube.NewForConfig(restConfig) +} + +func (c *client) ClusterConfig(cluster string) (*rest.Config, error) { + return c.restConfig("/kubernetes/cluster/" + cluster) +} + +func (c *client) Cluster(cluster string) (kube.Interface, error) { + restConfig, err := c.ClusterConfig(cluster) + if err != nil { + return nil, err + } + + return kube.NewForConfig(restConfig) +} + +func (c *client) VirtualClusterConfig(cluster, namespace, virtualCluster string) (*rest.Config, error) { + return c.restConfig("/kubernetes/virtualcluster/" + cluster + "/" + namespace + "/" + virtualCluster) +} + +func (c *client) VirtualCluster(cluster, namespace, virtualCluster string) (kube.Interface, error) { + restConfig, err := c.VirtualClusterConfig(cluster, namespace, virtualCluster) + if err != nil { + return nil, err + } + + return kube.NewForConfig(restConfig) +} + +func (c *client) Config() *Config { + return c.config +} + +type keyStruct struct { + Key string +} + +func verifyHost(host string) error { + if !strings.HasPrefix(host, "https") { + return fmt.Errorf("cannot log into a non https loft instance '%s', please make sure you have TLS enabled", host) + } + + return nil +} + +func (c *client) Version() (*auth.Version, error) { + restConfig, err := c.restConfig("") + if err != nil { + return nil, err + } + + restClient, err := kube.NewForConfig(restConfig) + if err != nil { + return nil, err + } + + raw, err := restClient.CoreV1().RESTClient().Get().RequestURI("/version").DoRaw(context.Background()) + if err != nil { + return nil, perrors.New(fmt.Sprintf("%s\n\nYou may need to login again via `%s login %s --insecure` to allow self-signed certificates\n", err.Error(), os.Args[0], restConfig.Host)) + } + + version := &auth.Version{} + err = json.Unmarshal(raw, version) + if err != nil { + return nil, perrors.Wrap(err, "parse version response") + } + + return version, nil +} + +func (c *client) Login(host string, insecure bool, log log.Logger) error { + var ( + loginURL = fmt.Sprintf(LoginPath, host) + key keyStruct + keyChannel = make(chan keyStruct) + ) + + err := verifyHost(host) + if err != nil { + return err + } + + server := startServer(fmt.Sprintf(RedirectPath, host), keyChannel, log) + err = open.Run(fmt.Sprintf(LoginPath, host)) + if err != nil { + return fmt.Errorf("couldn't open the login page in a browser: %w. Please use the --access-key flag for the login command. You can generate an access key here: %s", err, fmt.Sprintf(AccessKeyPath, host)) + } + log.Infof("If the browser does not open automatically, please navigate to %s", loginURL) + msg := "If you have problems logging in, please navigate to %s/profile/access-keys, click on 'Create Access Key' and then login via '%s %s --access-key ACCESS_KEY" + if insecure { + msg += " --insecure" + } + msg += "'" + log.Infof(msg, host, product.LoginCmd(), host) + log.Infof("Logging into %s...", product.DisplayName()) + + key = <-keyChannel + + go func() { + err = server.Shutdown(context.Background()) + if err != nil { + log.Debugf("Error shutting down server: %v", err) + } + }() + + close(keyChannel) + return c.LoginWithAccessKey(host, key.Key, insecure) +} + +func (c *client) LoginRaw(host, accessKey string, insecure bool) error { + if c.config.Host == host && c.config.AccessKey == accessKey { + return nil + } + + c.config.Host = host + c.config.Insecure = insecure + c.config.AccessKey = accessKey + c.config.DirectClusterEndpointToken = "" + c.config.DirectClusterEndpointTokenRequested = nil + return c.Save() +} + +func (c *client) LoginWithAccessKey(host, accessKey string, insecure bool) error { + err := verifyHost(host) + if err != nil { + return err + } + if c.config.Host == host && c.config.AccessKey == accessKey { + return nil + } + + // delete old access key if were logged in before + if c.config.AccessKey != "" { + managementClient, err := c.Management() + if err == nil { + self, err := managementClient.Loft().ManagementV1().Selves().Create(context.TODO(), &managementv1.Self{}, metav1.CreateOptions{}) + if err == nil && self.Status.AccessKey != "" && self.Status.AccessKeyType == storagev1.AccessKeyTypeLogin { + _ = managementClient.Loft().ManagementV1().OwnedAccessKeys().Delete(context.TODO(), self.Status.AccessKey, metav1.DeleteOptions{}) + } + } + } + + c.config.Host = host + c.config.Insecure = insecure + c.config.AccessKey = accessKey + c.config.DirectClusterEndpointToken = "" + c.config.DirectClusterEndpointTokenRequested = nil + + // verify version + err = VerifyVersion(c) + if err != nil { + return err + } + + // verify the connection works + managementClient, err := c.Management() + if err != nil { + return perrors.Wrap(err, "create management client") + } + + // try to get self + _, err = managementClient.Loft().ManagementV1().Selves().Create(context.TODO(), &managementv1.Self{}, metav1.CreateOptions{}) + if err != nil { + var urlError *url.Error + if errors.As(err, &urlError) { + var err x509.UnknownAuthorityError + if errors.As(urlError.Err, &err) { + return fmt.Errorf("unsafe login endpoint '%s', if you wish to login into an insecure loft endpoint run with the '--insecure' flag", c.config.Host) + } + } + + return perrors.Errorf("error logging in: %v", err) + } + + return c.Save() +} + +// VerifyVersion checks if the Loft version is compatible with this CLI version +func VerifyVersion(baseClient Client) error { + v, err := baseClient.Version() + if err != nil { + return err + } else if v.Version == "v0.0.0" { + return nil + } + + backendMajor, err := strconv.Atoi(v.Major) + if err != nil { + return perrors.Wrap(err, "parse major version string") + } + + cliVersionStr := upgrade.GetVersion() + if cliVersionStr == "" { + return nil + } + + cliVersion, err := semver.Parse(cliVersionStr) + if err != nil { + return err + } + + if int(cliVersion.Major) > backendMajor { + return fmt.Errorf("unsupported %[1]s version %[2]s. Please downgrade your CLI to below v%[3]d.0.0 to support this version, as %[1]s v%[3]d.0.0 and newer versions are incompatible with v%[4]d.x.x", product.DisplayName(), v.Version, cliVersion.Major, backendMajor) + } else if int(cliVersion.Major) < backendMajor { + return fmt.Errorf("unsupported %[1]s version %[2]s. Please upgrade your CLI to v%[3]d.0.0 or above to support this version, as %[1]s v%[3]d.0.0 and newer versions are incompatible with v%[4]d.x.x", product.DisplayName(), v.Version, backendMajor, cliVersion.Major) + } + + return nil +} + +func (c *client) restConfig(hostSuffix string) (*rest.Config, error) { + if c.config == nil { + return nil, perrors.New("no config loaded") + } else if c.config.Host == "" || c.config.AccessKey == "" { + return nil, perrors.New(fmt.Sprintf("not logged in, please make sure you have run '%s [%s]'", product.LoginCmd(), product.Url())) + } + + // build a rest config + config, err := GetRestConfig(c.config.Host+hostSuffix, c.config.AccessKey, c.config.Insecure) + if err != nil { + return nil, err + } + + return config, err +} + +func GetKubeConfig(host, token, namespace string, insecure bool) clientcmd.ClientConfig { + contextName := "local" + kubeConfig := clientcmdapi.NewConfig() + kubeConfig.Contexts = map[string]*clientcmdapi.Context{ + contextName: { + Cluster: contextName, + AuthInfo: contextName, + Namespace: namespace, + }, + } + kubeConfig.Clusters = map[string]*clientcmdapi.Cluster{ + contextName: { + Server: host, + InsecureSkipTLSVerify: insecure, + }, + } + kubeConfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{ + contextName: { + Token: token, + }, + } + kubeConfig.CurrentContext = contextName + return clientcmd.NewDefaultClientConfig(*kubeConfig, &clientcmd.ConfigOverrides{}) +} + +func GetRestConfig(host, token string, insecure bool) (*rest.Config, error) { + config, err := GetKubeConfig(host, token, "", insecure).ClientConfig() + if err != nil { + return nil, err + } + config.UserAgent = constants.LoftctlUserAgentPrefix + upgrade.GetVersion() + + return config, nil +} + +func startServer(redirectURI string, keyChannel chan keyStruct, log log.Logger) *http.Server { + srv := &http.Server{Addr: ":25843"} + + http.HandleFunc("/login", func(w http.ResponseWriter, r *http.Request) { + keys, ok := r.URL.Query()["key"] + if !ok || len(keys[0]) == 0 { + log.Warn("Login: the key used to login is not valid") + return + } + + keyChannel <- keyStruct{ + Key: keys[0], + } + http.Redirect(w, r, redirectURI, http.StatusSeeOther) + }) + + go func() { + // cannot panic, because this probably is an intentional close + _ = srv.ListenAndServe() + }() + + // returning reference so caller can call Shutdown() + return srv +} diff --git a/pkg/platform/loftclient/config.go b/pkg/platform/loftclient/config.go new file mode 100644 index 000000000..0ebc14f37 --- /dev/null +++ b/pkg/platform/loftclient/config.go @@ -0,0 +1,63 @@ +package client + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Config defines the client config structure +type Config struct { + metav1.TypeMeta `json:",inline"` + + // host is the http endpoint of how to access loft + // +optional + Host string `json:"host,omitempty"` + + // LastInstallContext is the last install context + // +optional + LastInstallContext string `json:"lastInstallContext,omitempty"` + + // insecure specifies if the loft instance is insecure + // +optional + Insecure bool `json:"insecure,omitempty"` + + // access key is the access key for the given loft host + // +optional + AccessKey string `json:"accesskey,omitempty"` + + // virtual cluster access key is the access key for the given loft host to create virtual clusters + // +optional + VirtualClusterAccessKey string `json:"virtualClusterAccessKey,omitempty"` + + // DEPRECATED: do not use anymore + // the direct cluster endpoint token + // +optional + DirectClusterEndpointToken string `json:"directClusterEndpointToken,omitempty"` + + // DEPRECATED: do not use anymore + // last time the direct cluster endpoint token was requested + // +optional + DirectClusterEndpointTokenRequested *metav1.Time `json:"directClusterEndpointTokenRequested,omitempty"` + + // map of cached certificates for "access point" mode virtual clusters + // +optional + VirtualClusterAccessPointCertificates map[string]VirtualClusterCertificatesEntry +} + +type VirtualClusterCertificatesEntry struct { + CertificateData string + KeyData string + LastRequested metav1.Time + ExpirationTime time.Time +} + +// NewConfig creates a new config +func NewConfig() *Config { + return &Config{ + TypeMeta: metav1.TypeMeta{ + Kind: "Config", + APIVersion: "storage.loft.sh/v1", + }, + } +} diff --git a/pkg/platform/loftclient/helper/helper.go b/pkg/platform/loftclient/helper/helper.go new file mode 100644 index 000000000..ba5c9c46c --- /dev/null +++ b/pkg/platform/loftclient/helper/helper.go @@ -0,0 +1,1160 @@ +package helper + +import ( + "context" + "errors" + "fmt" + "os" + "sort" + "strings" + + "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset/scheme" + "github.com/loft-sh/vcluster/pkg/platform/loftclient/naming" + authorizationv1 "k8s.io/api/authorization/v1" + + clusterv1 "github.com/loft-sh/agentapi/v4/pkg/apis/loft/cluster/v1" + managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" + "github.com/loft-sh/log" + "github.com/loft-sh/log/survey" + "github.com/loft-sh/vcluster/pkg/platform/clihelper" + "github.com/loft-sh/vcluster/pkg/platform/kube" + "github.com/loft-sh/vcluster/pkg/platform/kubeconfig" + client "github.com/loft-sh/vcluster/pkg/platform/loftclient" + "github.com/mgutz/ansi" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubectl/pkg/util/term" +) + +var errNoClusterAccess = errors.New("the user has no access to any cluster") + +type VirtualClusterInstanceProject struct { + VirtualCluster *managementv1.VirtualClusterInstance + Project *managementv1.Project +} + +type SpaceInstanceProject struct { + SpaceInstance *managementv1.SpaceInstance + Project *managementv1.Project +} + +func SelectVirtualClusterTemplate(ctx context.Context, baseClient client.Client, projectName, templateName string, log log.Logger) (*managementv1.VirtualClusterTemplate, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectTemplates, err := managementClient.Loft().ManagementV1().Projects().ListTemplates(ctx, projectName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // select default template + if templateName == "" && projectTemplates.DefaultVirtualClusterTemplate != "" { + templateName = projectTemplates.DefaultVirtualClusterTemplate + } + + // try to find template + if templateName != "" { + for _, virtualClusterTemplate := range projectTemplates.VirtualClusterTemplates { + if virtualClusterTemplate.Name == templateName { + return &virtualClusterTemplate, nil + } + } + + return nil, fmt.Errorf("couldn't find template %s as allowed template in project %s", templateName, projectName) + } else if len(projectTemplates.VirtualClusterTemplates) == 0 { + return nil, fmt.Errorf("there are no allowed virtual cluster templates in project %s", projectName) + } else if len(projectTemplates.VirtualClusterTemplates) == 1 { + return &projectTemplates.VirtualClusterTemplates[0], nil + } + + templateNames := []string{} + for _, template := range projectTemplates.VirtualClusterTemplates { + templateNames = append(templateNames, clihelper.GetDisplayName(template.Name, template.Spec.DisplayName)) + } + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a template to use", + DefaultValue: templateNames[0], + Options: templateNames, + }) + if err != nil { + return nil, err + } + for _, template := range projectTemplates.VirtualClusterTemplates { + if answer == clihelper.GetDisplayName(template.Name, template.Spec.DisplayName) { + return &template, nil + } + } + + return nil, fmt.Errorf("answer not found") +} + +func SelectSpaceTemplate(ctx context.Context, baseClient client.Client, projectName, templateName string, log log.Logger) (*managementv1.SpaceTemplate, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectTemplates, err := managementClient.Loft().ManagementV1().Projects().ListTemplates(ctx, projectName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // select default template + if templateName == "" && projectTemplates.DefaultSpaceTemplate != "" { + templateName = projectTemplates.DefaultSpaceTemplate + } + + // try to find template + if templateName != "" { + for _, spaceTemplate := range projectTemplates.SpaceTemplates { + if spaceTemplate.Name == templateName { + return &spaceTemplate, nil + } + } + + return nil, fmt.Errorf("couldn't find template %s as allowed template in project %s", templateName, projectName) + } else if len(projectTemplates.SpaceTemplates) == 0 { + return nil, fmt.Errorf("there are no allowed space templates in project %s", projectName) + } else if len(projectTemplates.SpaceTemplates) == 1 { + return &projectTemplates.SpaceTemplates[0], nil + } + + templateNames := []string{} + for _, template := range projectTemplates.SpaceTemplates { + templateNames = append(templateNames, clihelper.GetDisplayName(template.Name, template.Spec.DisplayName)) + } + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a template to use", + DefaultValue: templateNames[0], + Options: templateNames, + }) + if err != nil { + return nil, err + } + for _, template := range projectTemplates.SpaceTemplates { + if answer == clihelper.GetDisplayName(template.Name, template.Spec.DisplayName) { + return &template, nil + } + } + + return nil, fmt.Errorf("answer not found") +} + +func SelectVirtualClusterInstanceOrVirtualCluster(ctx context.Context, baseClient client.Client, virtualClusterName, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, string, error) { + if clusterName != "" || spaceName != "" { + virtualCluster, space, cluster, err := SelectVirtualClusterAndSpaceAndClusterName(ctx, baseClient, virtualClusterName, spaceName, clusterName, log) + return cluster, "", space, virtualCluster, err + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", "", "", "", err + } + + // gather projects and virtual cluster instances to access + var projects []*managementv1.Project + if projectName != "" { + project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return "", "", "", "", fmt.Errorf("couldn't find or access project %s", projectName) + } + + return "", "", "", "", err + } + + projects = append(projects, project) + } else { + projectsList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil || len(projectsList.Items) == 0 { + virtualCluster, space, cluster, err := SelectVirtualClusterAndSpaceAndClusterName(ctx, baseClient, virtualClusterName, spaceName, clusterName, log) + return cluster, "", space, virtualCluster, err + } + + for _, p := range projectsList.Items { + proj := p + projects = append(projects, &proj) + } + } + + // gather space instances in those projects + var virtualClusters []*VirtualClusterInstanceProject + for _, p := range projects { + if virtualClusterName != "" { + virtualClusterInstance, err := getProjectVirtualClusterInstance(ctx, managementClient, p, virtualClusterName) + if err != nil { + continue + } + + virtualClusters = append(virtualClusters, virtualClusterInstance) + } else { + projectVirtualClusters, err := getProjectVirtualClusterInstances(ctx, managementClient, p) + if err != nil { + continue + } + + virtualClusters = append(virtualClusters, projectVirtualClusters...) + } + } + + // get unformatted options + var optionsUnformatted [][]string + for _, virtualCluster := range virtualClusters { + optionsUnformatted = append(optionsUnformatted, []string{"vcluster: " + clihelper.GetDisplayName(virtualCluster.VirtualCluster.Name, virtualCluster.VirtualCluster.Spec.DisplayName), "Project: " + clihelper.GetDisplayName(virtualCluster.Project.Name, virtualCluster.Project.Spec.DisplayName)}) + } + + // check if there are virtualclusters + if len(virtualClusters) == 0 { + if virtualClusterName != "" { + return "", "", "", "", fmt.Errorf("couldn't find or access virtual cluster %s", virtualClusterName) + } + return "", "", "", "", fmt.Errorf("couldn't find a virtual cluster you have access to") + } else if len(virtualClusters) == 1 { + return "", virtualClusters[0].Project.Name, "", virtualClusters[0].VirtualCluster.Name, nil + } + + questionOptions := formatOptions("%s | %s", optionsUnformatted) + selectedOption, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a virtual cluster", + DefaultValue: questionOptions[0], + Options: questionOptions, + }) + if err != nil { + return "", "", "", "", err + } + + for idx, s := range questionOptions { + if s == selectedOption { + return "", virtualClusters[idx].Project.Name, "", virtualClusters[idx].VirtualCluster.Name, nil + } + } + + return "", "", "", "", fmt.Errorf("couldn't find answer") +} + +func SelectSpaceInstanceOrSpace(ctx context.Context, baseClient client.Client, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, error) { + if clusterName != "" { + space, cluster, err := SelectSpaceAndClusterName(ctx, baseClient, spaceName, clusterName, log) + return cluster, "", space, err + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", "", "", err + } + + // gather projects and space instances to access + var projects []*managementv1.Project + if projectName != "" { + project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return "", "", "", fmt.Errorf("couldn't find or access project %s", projectName) + } + + return "", "", "", err + } + + projects = append(projects, project) + } else { + projectsList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil || len(projectsList.Items) == 0 { + space, cluster, err := SelectSpaceAndClusterName(ctx, baseClient, spaceName, clusterName, log) + return cluster, "", space, err + } + + for _, p := range projectsList.Items { + proj := p + projects = append(projects, &proj) + } + } + + // gather space instances in those projects + var spaces []*SpaceInstanceProject + for _, p := range projects { + if spaceName != "" { + spaceInstance, err := getProjectSpaceInstance(ctx, managementClient, p, spaceName) + if err != nil { + continue + } + + spaces = append(spaces, spaceInstance) + } else { + projectSpaceInstances, err := getProjectSpaceInstances(ctx, managementClient, p) + if err != nil { + continue + } + + spaces = append(spaces, projectSpaceInstances...) + } + } + + // get unformatted options + var optionsUnformatted [][]string + for _, space := range spaces { + optionsUnformatted = append(optionsUnformatted, []string{"Space: " + clihelper.GetDisplayName(space.SpaceInstance.Name, space.SpaceInstance.Spec.DisplayName), "Project: " + clihelper.GetDisplayName(space.Project.Name, space.Project.Spec.DisplayName)}) + } + + // check if there are spaces + if len(spaces) == 0 { + if spaceName != "" { + return "", "", "", fmt.Errorf("couldn't find or access space %s", spaceName) + } + return "", "", "", fmt.Errorf("couldn't find a space you have access to") + } else if len(spaces) == 1 { + return spaces[0].SpaceInstance.Spec.ClusterRef.Cluster, spaces[0].Project.Name, spaces[0].SpaceInstance.Name, nil + } + + questionOptions := formatOptions("%s | %s", optionsUnformatted) + selectedOption, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a space", + DefaultValue: questionOptions[0], + Options: questionOptions, + }) + if err != nil { + return "", "", "", err + } + + for idx, s := range questionOptions { + if s == selectedOption { + return spaces[idx].SpaceInstance.Spec.ClusterRef.Cluster, spaces[idx].Project.Name, spaces[idx].SpaceInstance.Name, nil + } + } + + return "", "", "", fmt.Errorf("couldn't find answer") +} + +func SelectProjectOrCluster(ctx context.Context, baseClient client.Client, clusterName, projectName string, allowClusterOnly bool, log log.Logger) (cluster string, project string, err error) { + if projectName != "" { + return clusterName, projectName, nil + } else if allowClusterOnly && clusterName != "" { + return clusterName, "", nil + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", "", err + } + + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return "", "", err + } + + projectNames := []string{} + for _, project := range projectList.Items { + projectNames = append(projectNames, clihelper.GetDisplayName(project.Name, project.Spec.DisplayName)) + } + + if len(projectNames) == 0 { + cluster, err := SelectCluster(ctx, baseClient, log) + if err != nil { + if errors.Is(err, errNoClusterAccess) { + return "", "", fmt.Errorf("the user has no access to a project") + } + + return "", "", err + } + + return cluster, "", nil + } + + var selectedProject *managementv1.Project + if len(projectNames) == 1 { + selectedProject = &projectList.Items[0] + } else { + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a project to use", + DefaultValue: projectNames[0], + Options: projectNames, + }) + if err != nil { + return "", "", err + } + for idx, project := range projectList.Items { + if answer == clihelper.GetDisplayName(project.Name, project.Spec.DisplayName) { + selectedProject = &projectList.Items[idx] + } + } + if selectedProject == nil { + return "", "", fmt.Errorf("answer not found") + } + } + + if clusterName == "" { + clusterName, err = SelectProjectCluster(ctx, baseClient, selectedProject, log) + return clusterName, selectedProject.Name, err + } + + return clusterName, selectedProject.Name, nil +} + +// SelectCluster lets the user select a cluster +func SelectCluster(ctx context.Context, baseClient client.Client, log log.Logger) (string, error) { + managementClient, err := baseClient.Management() + if err != nil { + return "", err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return "", err + } + + clusterNames := []string{} + for _, cluster := range clusterList.Items { + clusterNames = append(clusterNames, clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName)) + } + + if len(clusterList.Items) == 0 { + return "", errNoClusterAccess + } else if len(clusterList.Items) == 1 { + return clusterList.Items[0].Name, nil + } + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a cluster to use", + DefaultValue: clusterNames[0], + Options: clusterNames, + }) + if err != nil { + return "", err + } + for _, cluster := range clusterList.Items { + if answer == clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName) { + return cluster.Name, nil + } + } + return "", fmt.Errorf("answer not found") +} + +// SelectProjectCluster lets the user select a cluster from the project's allowed clusters +func SelectProjectCluster(ctx context.Context, baseClient client.Client, project *managementv1.Project, log log.Logger) (string, error) { + if !term.IsTerminal(os.Stdin) { + // Allow loft to schedule as before + return "", nil + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", err + } + + clusterList, err := managementClient.Loft().ManagementV1().Projects().ListClusters(ctx, project.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + anyClusterOption := "Any Cluster [Loft Selects Cluster]" + clusterNames := []string{} + for _, allowedCluster := range project.Spec.AllowedClusters { + if allowedCluster.Name == "*" { + clusterNames = append(clusterNames, anyClusterOption) + break + } + } + + for _, cluster := range clusterList.Clusters { + clusterNames = append(clusterNames, clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName)) + } + + if len(clusterList.Clusters) == 0 { + return "", errNoClusterAccess + } else if len(clusterList.Clusters) == 1 { + return clusterList.Clusters[0].Name, nil + } + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a cluster to use", + DefaultValue: clusterNames[0], + Options: clusterNames, + }) + if err != nil { + return "", err + } + + if answer == anyClusterOption { + return "", nil + } + + for _, cluster := range clusterList.Clusters { + if answer == clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName) { + return cluster.Name, nil + } + } + return "", fmt.Errorf("answer not found") +} + +// SelectUserOrTeam lets the user select an user or team in a cluster +func SelectUserOrTeam(ctx context.Context, baseClient client.Client, clusterName string, log log.Logger) (*clusterv1.EntityInfo, *clusterv1.EntityInfo, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, nil, err + } + + clusterAccess, err := managementClient.Loft().ManagementV1().Clusters().ListAccess(ctx, clusterName, metav1.GetOptions{}) + if err != nil { + return nil, nil, err + } + + var user *clusterv1.EntityInfo + if len(clusterAccess.Users) > 0 { + user = &clusterAccess.Users[0].Info + } + + teams := []*clusterv1.EntityInfo{} + for _, team := range clusterAccess.Teams { + t := team + teams = append(teams, &t.Info) + } + + if user == nil && len(teams) == 0 { + return nil, nil, fmt.Errorf("the user has no access to cluster %s", clusterName) + } else if user != nil && len(teams) == 0 { + return user, nil, nil + } else if user == nil && len(teams) == 1 { + return nil, teams[0], nil + } + + names := []string{} + if user != nil { + names = append(names, "User "+clihelper.DisplayName(user)) + } + for _, t := range teams { + names = append(names, "Team "+clihelper.DisplayName(t)) + } + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a user or team to use", + DefaultValue: names[0], + Options: names, + }) + if err != nil { + return nil, nil, err + } + + if user != nil && "User "+clihelper.DisplayName(user) == answer { + return user, nil, nil + } + for _, t := range teams { + if "Team "+clihelper.DisplayName(t) == answer { + return nil, t, nil + } + } + + return nil, nil, fmt.Errorf("answer not found") +} + +type ClusterUserOrTeam struct { + Team bool + ClusterMember managementv1.ClusterMember +} + +func SelectClusterUserOrTeam(ctx context.Context, baseClient client.Client, clusterName, userName, teamName string, log log.Logger) (*ClusterUserOrTeam, error) { + if userName != "" && teamName != "" { + return nil, fmt.Errorf("team and user specified, please only choose one") + } + + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + members, err := managementClient.Loft().ManagementV1().Clusters().ListMembers(ctx, clusterName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("retrieve cluster members: %w", err) + } + + matchedMembers := []ClusterUserOrTeam{} + optionsUnformatted := [][]string{} + for _, user := range members.Users { + if teamName != "" { + continue + } else if userName != "" && user.Info.Name != userName { + continue + } + + matchedMembers = append(matchedMembers, ClusterUserOrTeam{ + ClusterMember: user, + }) + displayName := user.Info.DisplayName + if displayName == "" { + displayName = user.Info.Name + } + + optionsUnformatted = append(optionsUnformatted, []string{"User: " + displayName, "Kube User: " + user.Info.Name}) + } + for _, team := range members.Teams { + if userName != "" { + continue + } else if teamName != "" && team.Info.Name != teamName { + continue + } + + matchedMembers = append(matchedMembers, ClusterUserOrTeam{ + Team: true, + ClusterMember: team, + }) + displayName := team.Info.DisplayName + if displayName == "" { + displayName = team.Info.Name + } + + optionsUnformatted = append(optionsUnformatted, []string{"Team: " + displayName, "Kube Team: " + team.Info.Name}) + } + + questionOptions := formatOptions("%s | %s", optionsUnformatted) + if len(questionOptions) == 0 { + if userName == "" && teamName == "" { + return nil, fmt.Errorf("couldn't find any space") + } else if userName != "" { + return nil, fmt.Errorf("couldn't find user %s in cluster %s", ansi.Color(userName, "white+b"), ansi.Color(clusterName, "white+b")) + } + + return nil, fmt.Errorf("couldn't find team %s in cluster %s", ansi.Color(teamName, "white+b"), ansi.Color(clusterName, "white+b")) + } else if len(questionOptions) == 1 { + return &matchedMembers[0], nil + } + + selectedMember, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a user or team", + DefaultValue: questionOptions[0], + Options: questionOptions, + }) + if err != nil { + return nil, err + } + + for idx, s := range questionOptions { + if s == selectedMember { + return &matchedMembers[idx], nil + } + } + + return nil, fmt.Errorf("selected question option not found") +} + +func GetVirtualClusterInstances(ctx context.Context, baseClient client.Client) ([]*VirtualClusterInstanceProject, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + var retVClusters []*VirtualClusterInstanceProject + for _, project := range projectList.Items { + p := &project + + virtualClusterInstances, err := getProjectVirtualClusterInstances(ctx, managementClient, p) + if err != nil { + return nil, err + } + + retVClusters = append(retVClusters, virtualClusterInstances...) + } + + return retVClusters, nil +} + +func CanAccessProjectSecret(ctx context.Context, managementClient kube.Interface, namespace, name string) (bool, error) { + return CanAccessInstance(ctx, managementClient, namespace, name, "projectsecrets") +} + +func CanAccessInstance(ctx context.Context, managementClient kube.Interface, namespace, name string, resource string) (bool, error) { + selfSubjectAccessReview, err := managementClient.Loft().ManagementV1().SelfSubjectAccessReviews().Create(ctx, &managementv1.SelfSubjectAccessReview{ + Spec: managementv1.SelfSubjectAccessReviewSpec{ + SelfSubjectAccessReviewSpec: authorizationv1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Verb: "use", + Group: managementv1.SchemeGroupVersion.Group, + Version: managementv1.SchemeGroupVersion.Version, + Resource: resource, + Namespace: namespace, + Name: name, + }, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return false, err + } else if !selfSubjectAccessReview.Status.Allowed || selfSubjectAccessReview.Status.Denied { + return false, nil + } + return true, nil +} + +func GetSpaceInstances(ctx context.Context, baseClient client.Client) ([]*SpaceInstanceProject, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + var retSpaces []*SpaceInstanceProject + for _, project := range projectList.Items { + p := &project + + spaceInstances, err := getProjectSpaceInstances(ctx, managementClient, p) + if err != nil { + return nil, err + } + + retSpaces = append(retSpaces, spaceInstances...) + } + + return retSpaces, nil +} + +type ProjectProjectSecret struct { + ProjectSecret managementv1.ProjectSecret + Project string +} + +func GetProjectSecrets(ctx context.Context, managementClient kube.Interface, projectNames ...string) ([]*ProjectProjectSecret, error) { + var projects []*managementv1.Project + if len(projectNames) == 0 { + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for idx := range projectList.Items { + projectItem := projectList.Items[idx] + projects = append(projects, &projectItem) + } + } else { + for _, projectName := range projectNames { + project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + projects = append(projects, project) + } + } + + var retSecrets []*ProjectProjectSecret + for _, project := range projects { + projectSecrets, err := managementClient.Loft().ManagementV1().ProjectSecrets(naming.ProjectNamespace(project.Name)).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for _, projectSecret := range projectSecrets.Items { + canAccess, err := CanAccessProjectSecret(ctx, managementClient, projectSecret.Namespace, projectSecret.Name) + if err != nil { + return nil, err + } else if !canAccess { + continue + } + + retSecrets = append(retSecrets, &ProjectProjectSecret{ + ProjectSecret: projectSecret, + Project: project.Name, + }) + } + } + + return retSecrets, nil +} + +type ClusterSpace struct { + clusterv1.Space + Cluster string +} + +// GetSpaces returns all spaces accessible by the user or team +func GetSpaces(ctx context.Context, baseClient client.Client, log log.Logger) ([]ClusterSpace, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + spaceList := []ClusterSpace{} + for _, cluster := range clusterList.Items { + clusterClient, err := baseClient.Cluster(cluster.Name) + if err != nil { + return nil, err + } + + spaces, err := clusterClient.Agent().ClusterV1().Spaces().List(ctx, metav1.ListOptions{}) + if err != nil { + if kerrors.IsForbidden(err) { + continue + } + + log.Warnf("Error retrieving spaces from cluster %s: %v", clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName), err) + continue + } + + for _, space := range spaces.Items { + spaceList = append(spaceList, ClusterSpace{ + Space: space, + Cluster: cluster.Name, + }) + } + } + sort.Slice(spaceList, func(i, j int) bool { + return spaceList[i].Name < spaceList[j].Name + }) + + return spaceList, nil +} + +type ClusterVirtualCluster struct { + clusterv1.VirtualCluster + Cluster string +} + +// GetVirtualClusters returns all virtual clusters the user has access to +func GetVirtualClusters(ctx context.Context, baseClient client.Client, log log.Logger) ([]ClusterVirtualCluster, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + virtualClusterList := []ClusterVirtualCluster{} + for _, cluster := range clusterList.Items { + clusterClient, err := baseClient.Cluster(cluster.Name) + if err != nil { + return nil, err + } + + virtualClusters, err := clusterClient.Agent().ClusterV1().VirtualClusters("").List(ctx, metav1.ListOptions{}) + if err != nil { + if kerrors.IsForbidden(err) { + continue + } + + log.Warnf("Error retrieving virtual clusters from cluster %s: %v", clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName), err) + continue + } + + for _, virtualCluster := range virtualClusters.Items { + virtualClusterList = append(virtualClusterList, ClusterVirtualCluster{ + VirtualCluster: virtualCluster, + Cluster: cluster.Name, + }) + } + } + sort.Slice(virtualClusterList, func(i, j int) bool { + return virtualClusterList[i].Name < virtualClusterList[j].Name + }) + + return virtualClusterList, nil +} + +// SelectSpaceAndClusterName selects a space and cluster name +func SelectSpaceAndClusterName(ctx context.Context, baseClient client.Client, spaceName, clusterName string, log log.Logger) (string, string, error) { + spaces, err := GetSpaces(ctx, baseClient, log) + if err != nil { + return "", "", err + } + + currentContext, err := kubeconfig.CurrentContext() + if err != nil { + return "", "", fmt.Errorf("loading kubernetes config: %w", err) + } + + isLoftContext, cluster, namespace, vCluster := kubeconfig.ParseContext(currentContext) + matchedSpaces := []ClusterSpace{} + questionOptionsUnformatted := [][]string{} + defaultIndex := 0 + for _, space := range spaces { + if spaceName != "" && space.Space.Name != spaceName { + continue + } else if clusterName != "" && space.Cluster != clusterName { + continue + } else if len(matchedSpaces) > 20 { + break + } + + if isLoftContext && vCluster == "" && cluster == space.Cluster && namespace == space.Space.Name { + defaultIndex = len(questionOptionsUnformatted) + } + + matchedSpaces = append(matchedSpaces, space) + spaceName := space.Space.Name + if space.Space.Annotations != nil && space.Space.Annotations["loft.sh/display-name"] != "" { + spaceName = space.Space.Annotations["loft.sh/display-name"] + " (" + spaceName + ")" + } + + questionOptionsUnformatted = append(questionOptionsUnformatted, []string{spaceName, space.Cluster}) + } + + questionOptions := formatOptions("Space: %s | Cluster: %s", questionOptionsUnformatted) + if len(questionOptions) == 0 { + if spaceName == "" { + return "", "", fmt.Errorf("couldn't find any space") + } else if clusterName != "" { + return "", "", fmt.Errorf("couldn't find space %s in cluster %s", ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) + } + + return "", "", fmt.Errorf("couldn't find space %s", ansi.Color(spaceName, "white+b")) + } else if len(questionOptions) == 1 { + return matchedSpaces[0].Space.Name, matchedSpaces[0].Cluster, nil + } + + selectedSpace, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a space", + DefaultValue: questionOptions[defaultIndex], + Options: questionOptions, + }) + if err != nil { + return "", "", err + } + + for idx, s := range questionOptions { + if s == selectedSpace { + clusterName = matchedSpaces[idx].Cluster + spaceName = matchedSpaces[idx].Space.Name + break + } + } + + return spaceName, clusterName, nil +} + +func GetCurrentUser(ctx context.Context, managementClient kube.Interface) (*managementv1.UserInfo, *clusterv1.EntityInfo, error) { + self, err := managementClient.Loft().ManagementV1().Selves().Create(ctx, &managementv1.Self{}, metav1.CreateOptions{}) + if err != nil { + return nil, nil, fmt.Errorf("get self: %w", err) + } else if self.Status.User == nil && self.Status.Team == nil { + return nil, nil, fmt.Errorf("no user or team name returned") + } + + return self.Status.User, self.Status.Team, nil +} + +func SelectVirtualClusterAndSpaceAndClusterName(ctx context.Context, baseClient client.Client, virtualClusterName, spaceName, clusterName string, log log.Logger) (string, string, string, error) { + virtualClusters, err := GetVirtualClusters(ctx, baseClient, log) + if err != nil { + return "", "", "", err + } + + currentContext, err := kubeconfig.CurrentContext() + if err != nil { + return "", "", "", fmt.Errorf("loading kubernetes config: %w", err) + } + + isLoftContext, cluster, namespace, vCluster := kubeconfig.ParseContext(currentContext) + matchedVClusters := []ClusterVirtualCluster{} + questionOptionsUnformatted := [][]string{} + defaultIndex := 0 + for _, virtualCluster := range virtualClusters { + if virtualClusterName != "" && virtualCluster.VirtualCluster.Name != virtualClusterName { + continue + } else if spaceName != "" && virtualCluster.VirtualCluster.Namespace != spaceName { + continue + } else if clusterName != "" && virtualCluster.Cluster != clusterName { + continue + } + + if isLoftContext && vCluster == virtualCluster.VirtualCluster.Name && cluster == virtualCluster.Cluster && namespace == virtualCluster.VirtualCluster.Namespace { + defaultIndex = len(questionOptionsUnformatted) + } + + matchedVClusters = append(matchedVClusters, virtualCluster) + vClusterName := virtualCluster.VirtualCluster.Name + if virtualCluster.VirtualCluster.Annotations != nil && virtualCluster.VirtualCluster.Annotations["loft.sh/display-name"] != "" { + vClusterName = virtualCluster.VirtualCluster.Annotations["loft.sh/display-name"] + " (" + vClusterName + ")" + } + + questionOptionsUnformatted = append(questionOptionsUnformatted, []string{vClusterName, virtualCluster.VirtualCluster.Namespace, virtualCluster.Cluster}) + } + + questionOptions := formatOptions("vCluster: %s | Space: %s | Cluster: %s", questionOptionsUnformatted) + if len(questionOptions) == 0 { + if virtualClusterName == "" { + return "", "", "", fmt.Errorf("couldn't find any virtual cluster") + } else if spaceName != "" { + return "", "", "", fmt.Errorf("couldn't find virtualcluster %s in space %s in cluster %s", ansi.Color(virtualClusterName, "white+b"), ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) + } else if clusterName != "" { + return "", "", "", fmt.Errorf("couldn't find virtualcluster %s in space %s in cluster %s", ansi.Color(virtualClusterName, "white+b"), ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) + } + + return "", "", "", fmt.Errorf("couldn't find virtual cluster %s", ansi.Color(virtualClusterName, "white+b")) + } else if len(questionOptions) == 1 { + return matchedVClusters[0].VirtualCluster.Name, matchedVClusters[0].VirtualCluster.Namespace, matchedVClusters[0].Cluster, nil + } + + selectedSpace, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a virtual cluster to use", + DefaultValue: questionOptions[defaultIndex], + Options: questionOptions, + }) + if err != nil { + return "", "", "", err + } + + for idx, s := range questionOptions { + if s == selectedSpace { + clusterName = matchedVClusters[idx].Cluster + virtualClusterName = matchedVClusters[idx].VirtualCluster.Name + spaceName = matchedVClusters[idx].VirtualCluster.Namespace + break + } + } + + return virtualClusterName, spaceName, clusterName, nil +} + +func formatOptions(format string, options [][]string) []string { + if len(options) == 0 { + return []string{} + } + + columnLengths := make([]int, len(options[0])) + for _, row := range options { + for i, column := range row { + if len(column) > columnLengths[i] { + columnLengths[i] = len(column) + } + } + } + + retOptions := []string{} + for _, row := range options { + columns := []interface{}{} + for i := range row { + value := row[i] + if columnLengths[i] > len(value) { + value = value + strings.Repeat(" ", columnLengths[i]-len(value)) + } + + columns = append(columns, value) + } + + retOptions = append(retOptions, fmt.Sprintf(format, columns...)) + } + + return retOptions +} + +func getProjectSpaceInstance(ctx context.Context, managementClient kube.Interface, project *managementv1.Project, spaceName string) (*SpaceInstanceProject, error) { + spaceInstance := &managementv1.SpaceInstance{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("spaceinstances"). + Namespace(naming.ProjectNamespace(project.Name)). + Name(spaceName). + VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(spaceInstance) + if err != nil { + return nil, err + } + + if !spaceInstance.Status.CanUse { + return nil, fmt.Errorf("no use access") + } + + return &SpaceInstanceProject{ + SpaceInstance: spaceInstance, + Project: project, + }, nil +} + +func getProjectSpaceInstances(ctx context.Context, managementClient kube.Interface, project *managementv1.Project) ([]*SpaceInstanceProject, error) { + spaceInstanceList := &managementv1.SpaceInstanceList{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("spaceinstances"). + Namespace(naming.ProjectNamespace(project.Name)). + VersionedParams(&metav1.ListOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(spaceInstanceList) + if err != nil { + return nil, err + } + + var spaces []*SpaceInstanceProject + for _, spaceInstance := range spaceInstanceList.Items { + if !spaceInstance.Status.CanUse { + continue + } + + s := spaceInstance + spaces = append(spaces, &SpaceInstanceProject{ + SpaceInstance: &s, + Project: project, + }) + } + return spaces, nil +} + +func getProjectVirtualClusterInstance(ctx context.Context, managementClient kube.Interface, project *managementv1.Project, virtualClusterName string) (*VirtualClusterInstanceProject, error) { + virtualClusterInstance := &managementv1.VirtualClusterInstance{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("virtualclusterinstances"). + Namespace(naming.ProjectNamespace(project.Name)). + Name(virtualClusterName). + VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(virtualClusterInstance) + if err != nil { + return nil, err + } + + if !virtualClusterInstance.Status.CanUse { + return nil, fmt.Errorf("no use access") + } + + return &VirtualClusterInstanceProject{ + VirtualCluster: virtualClusterInstance, + Project: project, + }, nil +} + +func getProjectVirtualClusterInstances(ctx context.Context, managementClient kube.Interface, project *managementv1.Project) ([]*VirtualClusterInstanceProject, error) { + virtualClusterInstanceList := &managementv1.VirtualClusterInstanceList{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("virtualclusterinstances"). + Namespace(naming.ProjectNamespace(project.Name)). + VersionedParams(&metav1.ListOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(virtualClusterInstanceList) + if err != nil { + return nil, err + } + + var virtualClusters []*VirtualClusterInstanceProject + for _, virtualClusterInstance := range virtualClusterInstanceList.Items { + if !virtualClusterInstance.Status.CanUse { + continue + } + + v := virtualClusterInstance + virtualClusters = append(virtualClusters, &VirtualClusterInstanceProject{ + VirtualCluster: &v, + Project: project, + }) + } + return virtualClusters, nil +} diff --git a/pkg/platform/loftclient/naming/naming.go b/pkg/platform/loftclient/naming/naming.go new file mode 100644 index 000000000..e2952bfe2 --- /dev/null +++ b/pkg/platform/loftclient/naming/naming.go @@ -0,0 +1,24 @@ +package naming + +import ( + "crypto/sha256" + "encoding/hex" + "strings" +) + +func ProjectNamespace(projectName string) string { + return "loft-p-" + projectName +} + +func SafeConcatName(name ...string) string { + return SafeConcatNameMax(name, 63) +} + +func SafeConcatNameMax(name []string, max int) string { + fullPath := strings.Join(name, "-") + if len(fullPath) > max { + digest := sha256.Sum256([]byte(fullPath)) + return fullPath[0:max-8] + "-" + hex.EncodeToString(digest[0:])[0:7] + } + return fullPath +} diff --git a/pkg/platform/loftconfig/variables.go b/pkg/platform/loftconfig/variables.go new file mode 100644 index 000000000..de91b7bb7 --- /dev/null +++ b/pkg/platform/loftconfig/variables.go @@ -0,0 +1,21 @@ +package config + +import ( + "os" + "time" +) + +const ( + defaultTimeout = 10 * time.Minute + timeoutEnvVariable = "LOFT_TIMEOUT" +) + +func Timeout() time.Duration { + if timeout := os.Getenv(timeoutEnvVariable); timeout != "" { + if parsedTimeout, err := time.ParseDuration(timeout); err == nil { + return parsedTimeout + } + } + + return defaultTimeout +} diff --git a/pkg/platform/loftutils/positional_args.go b/pkg/platform/loftutils/positional_args.go new file mode 100644 index 000000000..08f9f23fa --- /dev/null +++ b/pkg/platform/loftutils/positional_args.go @@ -0,0 +1,69 @@ +package util + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +var ( + SpaceNameOnlyUseLine string + SpaceNameOnlyValidator cobra.PositionalArgs + + VClusterNameOnlyUseLine string + + VClusterNameOnlyValidator cobra.PositionalArgs +) + +func init() { + SpaceNameOnlyUseLine, SpaceNameOnlyValidator = NamedPositionalArgsValidator(true, true, "SPACE_NAME") + VClusterNameOnlyUseLine, VClusterNameOnlyValidator = NamedPositionalArgsValidator(true, true, "VCLUSTER_NAME") +} + +// NamedPositionalArgsValidator returns a cobra.PositionalArgs that returns a helpful +// error message if the arg number doesn't match. +// It also returns a string that can be appended to the cobra useline +// +// Example output for extra arguments with : +// +// $ command arg asdf +// [fatal] command ARG_1 [flags] +// Invalid Args: received 2 arguments, expected 1, extra arguments: "asdf" +// Run with --help for more details +// +// Example output for missing arguments: +// +// $ command +// [fatal] command ARG_1 [flags] +// Invalid Args: received 0 arguments, expected 1, please specify missing: "ARG_!" +// Run with --help for more details on arguments +func NamedPositionalArgsValidator(failMissing, failExtra bool, expectedArgs ...string) (string, cobra.PositionalArgs) { + return " " + strings.Join(expectedArgs, " "), func(cmd *cobra.Command, args []string) error { + numExpectedArgs := len(expectedArgs) + numArgs := len(args) + numMissing := numExpectedArgs - numArgs + + if numMissing == 0 { + return nil + } + + // didn't receive as many arguments as expected + if numMissing > 0 && failMissing { + // the last numMissing expectedArgs + missingKeys := strings.Join(expectedArgs[len(expectedArgs)-(numMissing):], ", ") + return fmt.Errorf("%s\nInvalid Args: received %d arguments, expected %d, please specify missing: %q\nRun with --help for more details on arguments", cmd.UseLine(), numArgs, numExpectedArgs, missingKeys) + } + + // received more than expected + if numMissing < 0 && failExtra { + // received more than expected + numExtra := -numMissing + // the last numExtra args + extraValues := strings.Join(args[len(args)-numExtra:], ", ") + return fmt.Errorf("%s\nInvalid Args: received %d arguments, expected %d, extra arguments: %q\nRun with --help for more details on arguments", cmd.UseLine(), numArgs, numExpectedArgs, extraValues) + } + + return nil + } +} diff --git a/pkg/platform/loftutils/positional_args_test.go b/pkg/platform/loftutils/positional_args_test.go new file mode 100644 index 000000000..ac45cb4d1 --- /dev/null +++ b/pkg/platform/loftutils/positional_args_test.go @@ -0,0 +1,55 @@ +package util + +import ( + "fmt" + "testing" + + "github.com/spf13/cobra" + "gotest.tools/v3/assert" +) + +func TestNamedPositionalArgsValidator(t *testing.T) { + // loop through a generated variety of inputs: arg counts, expected arg counts, and failMissing + // since it depends on the numbers, it's easier to loop than writing a testable + maxExpectedArgCount := 5 + maxActualArgsCount := maxExpectedArgCount + 5 + expectedArgs := []string{} + testNum := 0 + // loop through maxExpectedArgCount lengths of expectedArgs + for len(expectedArgs) <= maxExpectedArgCount { + actualArgs := []string{} + // loop through maxActualArgCount lengths of actualArgs + for len(actualArgs) <= maxActualArgsCount { + defer func() { + panicErr := recover() + if panicErr != nil { + t.Fatalf("this function should never panic: %+v", panicErr) + } + }() + testNum++ + // loop through both values of failMissing + for _, failMissing := range []bool{true, false} { + for _, failExtra := range []bool{true, false} { + // execute test + t.Logf("running test #%d with failMissing %v, failExtra %v, expectedArgs: %q, args: %q", testNum, failMissing, failExtra, expectedArgs, actualArgs) + // if testNum == 23 { + // t.Log("focus a test number number for debugging") + // } + _, validator := NamedPositionalArgsValidator(failMissing, failExtra, expectedArgs...) + err := validator(&cobra.Command{}, actualArgs) + if len(actualArgs) > len(expectedArgs) && failExtra { + assert.ErrorContains(t, err, "extra arguments:", "expect error to not be nil as arg count is mismatched") + } else if len(actualArgs) < len(expectedArgs) && failMissing { + assert.ErrorContains(t, err, "please specify missing:", "expect error to not be nil as arg count is mismatched") + } else { + assert.NilError(t, err, "expect error to be nil as all args provided and no extra") + } + // append to actual args + actualArgs = append(actualArgs, fmt.Sprintf("ARG_%d", len(actualArgs))) + } + } + } + // append to expected args + expectedArgs = append(expectedArgs, fmt.Sprintf("ARG_NAME_%d", len(expectedArgs))) + } +} diff --git a/pkg/platform/loftutils/util.go b/pkg/platform/loftutils/util.go new file mode 100644 index 000000000..bda639922 --- /dev/null +++ b/pkg/platform/loftutils/util.go @@ -0,0 +1,26 @@ +package util + +import ( + "errors" + + kerrors "k8s.io/apimachinery/pkg/api/errors" +) + +func GetCause(err error) string { + if err == nil { + return "" + } + + var statusErr *kerrors.StatusError + + if errors.As(err, &statusErr) { + details := statusErr.Status().Details + if details != nil && len(details.Causes) > 0 { + return details.Causes[0].Message + } + + return statusErr.Error() + } + + return err.Error() +} From c9759cced882871a35b1340d1b9bc4895560a115 Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 10:48:38 +0200 Subject: [PATCH 02/17] removed platform leftovers from temp work --- pkg/platform/clihelper/clihelper.go | 773 ----------- pkg/platform/defaults/defaults.go | 111 -- pkg/platform/kube/client.go | 54 - pkg/platform/kubeconfig/kubeconfig.go | 266 ---- pkg/platform/loftclient/client.go | 627 --------- pkg/platform/loftclient/config.go | 63 - pkg/platform/loftclient/helper/helper.go | 1160 ----------------- pkg/platform/loftclient/naming/naming.go | 24 - pkg/platform/loftconfig/variables.go | 21 - pkg/platform/loftutils/positional_args.go | 69 - .../loftutils/positional_args_test.go | 55 - pkg/platform/loftutils/util.go | 26 - 12 files changed, 3249 deletions(-) delete mode 100644 pkg/platform/clihelper/clihelper.go delete mode 100644 pkg/platform/defaults/defaults.go delete mode 100644 pkg/platform/kube/client.go delete mode 100644 pkg/platform/kubeconfig/kubeconfig.go delete mode 100644 pkg/platform/loftclient/client.go delete mode 100644 pkg/platform/loftclient/config.go delete mode 100644 pkg/platform/loftclient/helper/helper.go delete mode 100644 pkg/platform/loftclient/naming/naming.go delete mode 100644 pkg/platform/loftconfig/variables.go delete mode 100644 pkg/platform/loftutils/positional_args.go delete mode 100644 pkg/platform/loftutils/positional_args_test.go delete mode 100644 pkg/platform/loftutils/util.go diff --git a/pkg/platform/clihelper/clihelper.go b/pkg/platform/clihelper/clihelper.go deleted file mode 100644 index 254e7ce79..000000000 --- a/pkg/platform/clihelper/clihelper.go +++ /dev/null @@ -1,773 +0,0 @@ -package clihelper - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "os/exec" - "path" - "sort" - "strconv" - "strings" - "time" - - clusterv1 "github.com/loft-sh/agentapi/v4/pkg/apis/loft/cluster/v1" - storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" - "github.com/loft-sh/api/v4/pkg/product" - "github.com/loft-sh/loftctl/v4/pkg/httputil" - "github.com/sirupsen/logrus" - - jsonpatch "github.com/evanphx/json-patch" - loftclientset "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset" - "github.com/loft-sh/loftctl/v4/pkg/config" - "github.com/loft-sh/loftctl/v4/pkg/portforward" - "github.com/loft-sh/log" - "github.com/loft-sh/log/survey" - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/transport/spdy" - "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" -) - -// CriticalStatus container status -var CriticalStatus = map[string]bool{ - "Error": true, - "Unknown": true, - "ImagePullBackOff": true, - "CrashLoopBackOff": true, - "RunContainerError": true, - "ErrImagePull": true, - "CreateContainerConfigError": true, - "InvalidImageName": true, -} - -const defaultReleaseName = "loft" - -const LoftRouterDomainSecret = "loft-router-domain" - -var defaultDeploymentName = "loft" - -func GetDisplayName(name string, displayName string) string { - if displayName != "" { - return displayName - } - - return name -} - -func GetTableDisplayName(name string, displayName string) string { - if displayName != "" && displayName != name { - return displayName + " (" + name + ")" - } - - return name -} - -func DisplayName(entityInfo *clusterv1.EntityInfo) string { - if entityInfo == nil { - return "" - } else if entityInfo.DisplayName != "" { - return entityInfo.DisplayName - } else if entityInfo.Username != "" { - return entityInfo.Username - } - - return entityInfo.Name -} - -func GetLoftIngressHost(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (string, error) { - ingress, err := kubeClient.NetworkingV1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) - if err != nil { - ingress, err := kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) - if err != nil { - return "", err - } - // find host - if len(ingress.Spec.Rules) > 0 { - return ingress.Spec.Rules[0].Host, nil - } - } else { - // find host - if len(ingress.Spec.Rules) > 0 { - return ingress.Spec.Rules[0].Host, nil - } - } - - return "", fmt.Errorf("couldn't find any host in loft ingress '%s/loft-ingress', please make sure you have not changed any deployed resources", namespace) -} - -func WaitForReadyLoftPod(ctx context.Context, kubeClient kubernetes.Interface, namespace string, log log.Logger) (*corev1.Pod, error) { - // wait until we have a running loft pod - now := time.Now() - pod := &corev1.Pod{} - err := wait.PollUntilContextTimeout(ctx, time.Second*2, config.Timeout(), true, func(ctx context.Context) (bool, error) { - pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ - LabelSelector: "app=loft", - }) - if err != nil { - log.Warnf("Error trying to retrieve %s pod: %v", product.DisplayName(), err) - return false, nil - } else if len(pods.Items) == 0 { - if time.Now().After(now.Add(time.Second * 10)) { - log.Infof("Still waiting for a %s pod...", product.DisplayName()) - now = time.Now() - } - return false, nil - } - - sort.Slice(pods.Items, func(i, j int) bool { - return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) - }) - - loftPod := &pods.Items[0] - found := false - for _, containerStatus := range loftPod.Status.ContainerStatuses { - if containerStatus.State.Running != nil && containerStatus.Ready { - if containerStatus.Name == "manager" { - found = true - } - - continue - } else if containerStatus.State.Terminated != nil || (containerStatus.State.Waiting != nil && CriticalStatus[containerStatus.State.Waiting.Reason]) { - reason := "" - message := "" - if containerStatus.State.Terminated != nil { - reason = containerStatus.State.Terminated.Reason - message = containerStatus.State.Terminated.Message - } else if containerStatus.State.Waiting != nil { - reason = containerStatus.State.Waiting.Reason - message = containerStatus.State.Waiting.Message - } - - out, err := kubeClient.CoreV1().Pods(namespace).GetLogs(loftPod.Name, &corev1.PodLogOptions{ - Container: "manager", - }).Do(context.Background()).Raw() - if err != nil { - return false, fmt.Errorf("there seems to be an issue with %s starting up: %s (%s). Please reach out to our support at https://loft.sh/", product.DisplayName(), message, reason) - } - if strings.Contains(string(out), "register instance: Post \"https://license.loft.sh/register\": dial tcp") { - return false, fmt.Errorf("%[1]s logs: \n%[2]v \nThere seems to be an issue with %[1]s starting up. Looks like you try to install %[1]s into an air-gapped environment, please reach out to our support at https://loft.sh/ for an offline license", product.DisplayName(), string(out)) - } - - return false, fmt.Errorf("%[1]s logs: \n%v \nThere seems to be an issue with %[1]s starting up: %[2]s (%[3]s). Please reach out to our support at https://loft.sh/", product.DisplayName(), string(out), message, reason) - } else if containerStatus.State.Waiting != nil && time.Now().After(now.Add(time.Second*10)) { - if containerStatus.State.Waiting.Message != "" { - log.Infof("Please keep waiting, %s container is still starting up: %s (%s)", product.DisplayName(), containerStatus.State.Waiting.Message, containerStatus.State.Waiting.Reason) - } else if containerStatus.State.Waiting.Reason != "" { - log.Infof("Please keep waiting, %s container is still starting up: %s", product.DisplayName(), containerStatus.State.Waiting.Reason) - } else { - log.Infof("Please keep waiting, %s container is still starting up...", product.DisplayName()) - } - - now = time.Now() - } - - return false, nil - } - - pod = loftPod - return found, nil - }) - if err != nil { - return nil, err - } - - return pod, nil -} - -func StartPortForwarding(ctx context.Context, config *rest.Config, client kubernetes.Interface, pod *corev1.Pod, localPort string, log log.Logger) (chan struct{}, error) { - log.WriteString(logrus.InfoLevel, "\n") - log.Infof("Starting port-forwarding to the %s pod", product.DisplayName()) - execRequest := client.CoreV1().RESTClient().Post(). - Resource("pods"). - Name(pod.Name). - Namespace(pod.Namespace). - SubResource("portforward") - - t, upgrader, err := spdy.RoundTripperFor(config) - if err != nil { - return nil, err - } - - dialer := spdy.NewDialer(upgrader, &http.Client{Transport: t}, "POST", execRequest.URL()) - errChan := make(chan error) - readyChan := make(chan struct{}) - stopChan := make(chan struct{}) - targetPort := getPortForwardingTargetPort(pod) - forwarder, err := portforward.New(dialer, []string{localPort + ":" + strconv.Itoa(targetPort)}, stopChan, readyChan, errChan, io.Discard, io.Discard) - if err != nil { - return nil, err - } - - go func() { - err := forwarder.ForwardPorts(ctx) - if err != nil { - errChan <- err - } - }() - - // wait till ready - select { - case err = <-errChan: - return nil, err - case <-readyChan: - case <-stopChan: - return nil, fmt.Errorf("stopped before ready") - } - - // start watcher - go func() { - for { - select { - case <-stopChan: - return - case err = <-errChan: - log.Infof("error during port forwarder: %v", err) - close(stopChan) - return - } - } - }() - - return stopChan, nil -} - -func GetLoftDefaultPassword(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (string, error) { - loftNamespace, err := kubeClient.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - loftNamespace, err := kubeClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - }, - }, metav1.CreateOptions{}) - if err != nil { - return "", err - } - - return string(loftNamespace.UID), nil - } - - return "", err - } - - return string(loftNamespace.UID), nil -} - -type version struct { - Version string `json:"version"` -} - -func IsLoftReachable(ctx context.Context, host string) (bool, error) { - // wait until loft is reachable at the given url - client := &http.Client{ - Transport: httputil.InsecureTransport(), - } - url := "https://" + host + "/version" - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return false, fmt.Errorf("error creating request with context: %w", err) - } - resp, err := client.Do(req) - if err == nil && resp.StatusCode == http.StatusOK { - out, err := io.ReadAll(resp.Body) - if err != nil { - return false, nil - } - - v := &version{} - err = json.Unmarshal(out, v) - if err != nil { - return false, fmt.Errorf("error decoding response from %s: %w. Try running '%s --reset'", url, err, product.StartCmd()) - } else if v.Version == "" { - return false, fmt.Errorf("unexpected response from %s: %s. Try running '%s --reset'", url, string(out), product.StartCmd()) - } - - return true, nil - } - - return false, nil -} - -func IsLocalCluster(host string, log log.Logger) bool { - url, err := url.Parse(host) - if err != nil { - log.Warnf("Couldn't parse kube context host url: %v", err) - return false - } - - hostname := url.Hostname() - ip := net.ParseIP(hostname) - if ip != nil { - if IsPrivateIP(ip) { - return true - } - } - - if hostname == "localhost" || strings.HasSuffix(hostname, ".internal") || strings.HasSuffix(hostname, ".localhost") { - return true - } - - return false -} - -var privateIPBlocks []*net.IPNet - -func init() { - for _, cidr := range []string{ - "127.0.0.0/8", // IPv4 loopback - "10.0.0.0/8", // RFC1918 - "172.16.0.0/12", // RFC1918 - "192.168.0.0/16", // RFC1918 - "::1/128", // IPv6 loopback - "fe80::/10", // IPv6 link-local - "fc00::/7", // IPv6 unique local addr - } { - _, block, _ := net.ParseCIDR(cidr) - privateIPBlocks = append(privateIPBlocks, block) - } -} - -// IsPrivateIP checks if a given ip is private -func IsPrivateIP(ip net.IP) bool { - for _, block := range privateIPBlocks { - if block.Contains(ip) { - return true - } - } - - return false -} - -func EnterHostNameQuestion(log log.Logger) (string, error) { - return log.Question(&survey.QuestionOptions{ - Question: fmt.Sprintf("Enter a hostname for your %s instance (e.g. loft.my-domain.tld): \n ", product.DisplayName()), - ValidationFunc: func(answer string) error { - u, err := url.Parse("https://" + answer) - if err != nil || u.Path != "" || u.Port() != "" || len(strings.Split(answer, ".")) < 2 { - return fmt.Errorf("please enter a valid hostname without protocol (https://), without path and without port, e.g. loft.my-domain.tld") - } - return nil - }, - }) -} - -func IsLoftAlreadyInstalled(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (bool, error) { - _, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - return false, nil - } - - return false, fmt.Errorf("error accessing kubernetes cluster: %w", err) - } - - return true, nil -} - -func UninstallLoft(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, kubeContext, namespace string, log log.Logger) error { - log.Infof("Uninstalling %s...", product.DisplayName()) - releaseName := defaultReleaseName - deploy, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return err - } else if deploy != nil && deploy.Labels != nil && deploy.Labels["release"] != "" { - releaseName = deploy.Labels["release"] - } - - args := []string{ - "uninstall", - releaseName, - "--kube-context", - kubeContext, - "--namespace", - namespace, - } - log.Infof("Executing command: helm %s", strings.Join(args, " ")) - output, err := exec.Command("helm", args...).CombinedOutput() - if err != nil { - log.Errorf("error during helm command: %s (%v)", string(output), err) - } - - // we also cleanup the validating webhook configuration and apiservice - apiRegistrationClient, err := clientset.NewForConfig(restConfig) - if err != nil { - return err - } - - err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1.management.loft.sh", metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return err - } - - err = deleteUser(ctx, restConfig, "admin") - if err != nil { - return err - } - - err = kubeClient.CoreV1().Secrets(namespace).Delete(context.Background(), "loft-user-secret-admin", metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return err - } - - err = kubeClient.CoreV1().Secrets(namespace).Delete(context.Background(), LoftRouterDomainSecret, metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return err - } - - // we also cleanup the validating webhook configuration and apiservice - err = kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(ctx, "loft-agent", metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return err - } - - err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1alpha1.tenancy.kiosk.sh", metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return err - } - - err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1.cluster.loft.sh", metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return err - } - - err = kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, "loft-agent-controller", metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return err - } - - err = kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, "loft-applied-defaults", metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return err - } - - log.WriteString(logrus.InfoLevel, "\n") - log.Done(product.Replace("Successfully uninstalled Loft")) - log.WriteString(logrus.InfoLevel, "\n") - - return nil -} - -func deleteUser(ctx context.Context, restConfig *rest.Config, name string) error { - loftClient, err := loftclientset.NewForConfig(restConfig) - if err != nil { - return err - } - - user, err := loftClient.StorageV1().Users().Get(ctx, name, metav1.GetOptions{}) - if err != nil { - return nil - } else if len(user.Finalizers) > 0 { - user.Finalizers = nil - _, err = loftClient.StorageV1().Users().Update(ctx, user, metav1.UpdateOptions{}) - if err != nil { - if kerrors.IsConflict(err) { - return deleteUser(ctx, restConfig, name) - } - - return err - } - } - - err = loftClient.StorageV1().Users().Delete(ctx, name, metav1.DeleteOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return err - } - - return nil -} - -func EnsureIngressController(ctx context.Context, kubeClient kubernetes.Interface, kubeContext string, log log.Logger) error { - // first create an ingress controller - const ( - YesOption = "Yes" - NoOption = "No, I already have an ingress controller installed." - ) - - answer, err := log.Question(&survey.QuestionOptions{ - Question: "Ingress controller required. Should the nginx-ingress controller be installed?", - DefaultValue: YesOption, - Options: []string{ - YesOption, - NoOption, - }, - }) - if err != nil { - return err - } - - if answer == YesOption { - args := []string{ - "install", - "ingress-nginx", - "ingress-nginx", - "--repository-config=''", - "--repo", - "https://kubernetes.github.io/ingress-nginx", - "--kube-context", - kubeContext, - "--namespace", - "ingress-nginx", - "--create-namespace", - "--set-string", - "controller.config.hsts=false", - "--wait", - } - log.WriteString(logrus.InfoLevel, "\n") - log.Infof("Executing command: helm %s\n", strings.Join(args, " ")) - log.Info("Waiting for ingress controller deployment, this can take several minutes...") - helmCmd := exec.Command("helm", args...) - output, err := helmCmd.CombinedOutput() - if err != nil { - return fmt.Errorf("error during helm command: %s (%w)", string(output), err) - } - - list, err := kubeClient.CoreV1().Secrets("ingress-nginx").List(ctx, metav1.ListOptions{ - LabelSelector: "name=ingress-nginx,owner=helm,status=deployed", - }) - if err != nil { - return err - } - - if len(list.Items) == 1 { - secret := list.Items[0] - originalSecret := secret.DeepCopy() - secret.Labels["loft.sh/app"] = "true" - if secret.Annotations == nil { - secret.Annotations = map[string]string{} - } - - secret.Annotations["loft.sh/url"] = "https://kubernetes.github.io/ingress-nginx" - originalJSON, err := json.Marshal(originalSecret) - if err != nil { - return err - } - modifiedJSON, err := json.Marshal(secret) - if err != nil { - return err - } - data, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) - if err != nil { - return err - } - _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Patch(ctx, secret.Name, types.MergePatchType, data, metav1.PatchOptions{}) - if err != nil { - return err - } - } - - log.Done("Successfully installed ingress-nginx to your kubernetes cluster!") - } - - return nil -} - -func UpgradeLoft(chartName, chartRepo, kubeContext, namespace string, extraArgs []string, log log.Logger) error { - // now we install loft - args := []string{ - "upgrade", - defaultReleaseName, - chartName, - "--install", - "--reuse-values", - "--create-namespace", - "--repository-config=''", - "--kube-context", - kubeContext, - "--namespace", - namespace, - } - if chartRepo != "" { - args = append(args, "--repo", chartRepo) - } - args = append(args, extraArgs...) - - log.WriteString(logrus.InfoLevel, "\n") - log.Infof("Executing command: helm %s\n", strings.Join(args, " ")) - log.Info("Waiting for helm command, this can take up to several minutes...") - helmCmd := exec.Command("helm", args...) - if chartRepo != "" { - helmWorkDir, err := getHelmWorkdir(chartName) - if err != nil { - return err - } - - helmCmd.Dir = helmWorkDir - } - output, err := helmCmd.CombinedOutput() - if err != nil { - return fmt.Errorf("error during helm command: %s (%w)", string(output), err) - } - - log.Donef("%s has been deployed to your cluster!", product.DisplayName()) - return nil -} - -func GetLoftManifests(chartName, chartRepo, kubeContext, namespace string, extraArgs []string, _ log.Logger) (string, error) { - args := []string{ - "template", - defaultReleaseName, - chartName, - "--repository-config=''", - "--kube-context", - kubeContext, - "--namespace", - namespace, - } - if chartRepo != "" { - args = append(args, "--repo", chartRepo) - } - args = append(args, extraArgs...) - - helmCmd := exec.Command("helm", args...) - if chartRepo != "" { - helmWorkDir, err := getHelmWorkdir(chartName) - if err != nil { - return "", err - } - - helmCmd.Dir = helmWorkDir - } - output, err := helmCmd.CombinedOutput() - if err != nil { - return "", fmt.Errorf("error during helm command: %s (%w)", string(output), err) - } - return string(output), nil -} - -// Return the directory where the `helm` commands should be executed or error if none can be found/created -// Uses current workdir by default unless it contains a folder with the chart name -func getHelmWorkdir(chartName string) (string, error) { - // If chartName folder exists, check temp dir next - if _, err := os.Stat(chartName); err == nil { - tempDir := os.TempDir() - - // If tempDir/chartName folder exists, create temp folder - if _, err := os.Stat(path.Join(tempDir, chartName)); err == nil { - tempDir, err = os.MkdirTemp(tempDir, chartName) - if err != nil { - return "", errors.New("problematic directory `" + chartName + "` found: please execute command in a different folder") - } - } - - // Use tempDir - return tempDir, nil - } - - // Use current workdir - return "", nil -} - -// Makes sure that admin user and password secret exists -// Returns (true, nil) if everything is correct but password is different from parameter `password` -func EnsureAdminPassword(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, password string, log log.Logger) (bool, error) { - loftClient, err := loftclientset.NewForConfig(restConfig) - if err != nil { - return false, err - } - - admin, err := loftClient.StorageV1().Users().Get(ctx, "admin", metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return false, err - } else if admin == nil { - admin, err = loftClient.StorageV1().Users().Create(ctx, &storagev1.User{ - ObjectMeta: metav1.ObjectMeta{ - Name: "admin", - }, - Spec: storagev1.UserSpec{ - Username: "admin", - Email: "test@domain.tld", - Subject: "admin", - Groups: []string{"system:masters"}, - PasswordRef: &storagev1.SecretRef{ - SecretName: "loft-user-secret-admin", - SecretNamespace: "loft", - Key: "password", - }, - }, - }, metav1.CreateOptions{}) - if err != nil { - return false, err - } - } else if admin.Spec.PasswordRef == nil || admin.Spec.PasswordRef.SecretName == "" || admin.Spec.PasswordRef.SecretNamespace == "" { - return false, nil - } - - key := admin.Spec.PasswordRef.Key - if key == "" { - key = "password" - } - - passwordHash := fmt.Sprintf("%x", sha256.Sum256([]byte(password))) - - secret, err := kubeClient.CoreV1().Secrets(admin.Spec.PasswordRef.SecretNamespace).Get(ctx, admin.Spec.PasswordRef.SecretName, metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return false, err - } else if err == nil { - existingPasswordHash, keyExists := secret.Data[key] - if keyExists { - return (string(existingPasswordHash) != passwordHash), nil - } - - secret.Data[key] = []byte(passwordHash) - _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Update(ctx, secret, metav1.UpdateOptions{}) - if err != nil { - return false, errors.Wrap(err, "update admin password secret") - } - return false, nil - } - - // create the password secret if it was not found, this can happen if you delete the loft namespace without deleting the admin user - secret = &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: admin.Spec.PasswordRef.SecretName, - Namespace: admin.Spec.PasswordRef.SecretNamespace, - }, - Data: map[string][]byte{ - key: []byte(passwordHash), - }, - } - _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Create(ctx, secret, metav1.CreateOptions{}) - if err != nil { - return false, errors.Wrap(err, "create admin password secret") - } - - log.Info("Successfully recreated admin password secret") - return false, nil -} - -func IsLoftInstalledLocally(ctx context.Context, kubeClient kubernetes.Interface, namespace string) bool { - _, err := kubeClient.NetworkingV1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - _, err = kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) - return kerrors.IsNotFound(err) - } - - return kerrors.IsNotFound(err) -} - -func getPortForwardingTargetPort(pod *corev1.Pod) int { - for _, container := range pod.Spec.Containers { - if container.Name == "manager" { - for _, port := range container.Ports { - if port.Name == "https" { - return int(port.ContainerPort) - } - } - } - } - - return 10443 -} diff --git a/pkg/platform/defaults/defaults.go b/pkg/platform/defaults/defaults.go deleted file mode 100644 index 7b7fdd945..000000000 --- a/pkg/platform/defaults/defaults.go +++ /dev/null @@ -1,111 +0,0 @@ -package platformdefaults - -import ( - "encoding/json" - "os" - "path/filepath" - - "github.com/loft-sh/loftctl/v4/pkg/client" - "github.com/pkg/errors" -) - -const ( - KeyProject = "project" -) - -var ( - ConfigFile = "defaults.json" - ConfigFolder = client.CacheFolder - - DefaultKeys = []string{KeyProject} -) - -// Defaults holds the default values -type Defaults struct { - folderPath string - fileName string - fullPath string - - values map[string]string -} - -// NewFromPath creates a new defaults instance from the given path -func NewFromPath(folderPath string, fileName string) (*Defaults, error) { - fullPath := filepath.Join(folderPath, fileName) - defaults := &Defaults{folderPath, fileName, fullPath, make(map[string]string)} - - if err := defaults.ensureConfigFile(); err != nil { - return defaults, errors.Wrap(err, "no config file") - } - - contents, err := os.ReadFile(fullPath) - if err != nil { - return defaults, errors.Wrap(err, "read config file") - } - if len(contents) == 0 { - return defaults, nil - } - if err = json.Unmarshal(contents, &defaults.values); err != nil { - return defaults, errors.Wrap(err, "invalid json") - } - - return defaults, nil -} - -// Set sets the given key to the given value and persists the defaults on disk -func (d *Defaults) Set(key string, value string) error { - if !IsSupportedKey(key) { - return errors.Errorf("key %s is not supported", key) - } - - d.values[key] = value - json, err := json.Marshal(d.values) - if err != nil { - return errors.Wrap(err, "invalid json") - } - if err = os.WriteFile(d.fullPath, json, os.ModePerm); err != nil { - return errors.Wrap(err, "write config file") - } - - return nil -} - -// Get returns the value for the given key -func (d *Defaults) Get(key string, fallback string) (string, error) { - if !IsSupportedKey(key) { - return fallback, errors.Errorf("key %s is not supported", key) - } - - return d.values[key], nil -} - -// IsSupportedKey returns true if the given key is supported -func IsSupportedKey(key string) bool { - for _, k := range DefaultKeys { - if k == key { - return true - } - } - - return false -} - -func (d *Defaults) ensureConfigFile() error { - _, err := os.Stat(d.fullPath) - // file exists - if err == nil { - return nil - } - - if os.IsNotExist(err) { - if err := os.MkdirAll(d.folderPath, os.ModePerm); err != nil { - return errors.Wrap(err, "create cache folder") - } - if _, err := os.Create(d.fullPath); err != nil { - return errors.Wrap(err, "create defaults file") - } - - return nil - } - return err -} diff --git a/pkg/platform/kube/client.go b/pkg/platform/kube/client.go deleted file mode 100644 index 21699183b..000000000 --- a/pkg/platform/kube/client.go +++ /dev/null @@ -1,54 +0,0 @@ -package kube - -import ( - agentloftclient "github.com/loft-sh/agentapi/v4/pkg/client/loft/clientset_generated/clientset" - loftclient "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset" - - "github.com/pkg/errors" - - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -type Interface interface { - kubernetes.Interface - Loft() loftclient.Interface - Agent() agentloftclient.Interface -} - -func NewForConfig(c *rest.Config) (Interface, error) { - kubeClient, err := kubernetes.NewForConfig(c) - if err != nil { - return nil, errors.Wrap(err, "create kube client") - } - - loftClient, err := loftclient.NewForConfig(c) - if err != nil { - return nil, errors.Wrap(err, "create loft client") - } - - agentLoftClient, err := agentloftclient.NewForConfig(c) - if err != nil { - return nil, errors.Wrap(err, "create kiosk client") - } - - return &client{ - Interface: kubeClient, - loftClient: loftClient, - agentLoftClient: agentLoftClient, - }, nil -} - -type client struct { - kubernetes.Interface - loftClient loftclient.Interface - agentLoftClient agentloftclient.Interface -} - -func (c *client) Loft() loftclient.Interface { - return c.loftClient -} - -func (c *client) Agent() agentloftclient.Interface { - return c.agentLoftClient -} diff --git a/pkg/platform/kubeconfig/kubeconfig.go b/pkg/platform/kubeconfig/kubeconfig.go deleted file mode 100644 index 602ecf63e..000000000 --- a/pkg/platform/kubeconfig/kubeconfig.go +++ /dev/null @@ -1,266 +0,0 @@ -package kubeconfig - -import ( - "io" - "os" - "path/filepath" - "strings" - - "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/clientcmd/api" -) - -type ContextOptions struct { - Name string - Server string - CaData []byte - ConfigPath string - InsecureSkipTLSVerify bool - DirectClusterEndpointEnabled bool - VirtualClusterAccessPointEnabled bool - - Token string - ClientKeyData []byte - ClientCertificateData []byte - - CurrentNamespace string - SetActive bool -} - -func SpaceInstanceContextName(projectName, spaceInstanceName string) string { - return "loft_" + spaceInstanceName + "_" + projectName -} - -func VirtualClusterInstanceContextName(projectName, virtualClusterInstance string) string { - return "loft-vcluster_" + virtualClusterInstance + "_" + projectName -} - -func virtualClusterInstanceProjectAndNameFromContextName(contextName string) (string, string) { - return strings.Split(contextName, "_")[2], strings.Split(contextName, "_")[1] -} - -func SpaceContextName(clusterName, namespaceName string) string { - contextName := "loft_" - if namespaceName != "" { - contextName += namespaceName + "_" - } - - contextName += clusterName - return contextName -} - -func VirtualClusterContextName(clusterName, namespaceName, virtualClusterName string) string { - return "loft-vcluster_" + virtualClusterName + "_" + namespaceName + "_" + clusterName -} - -func ManagementContextName() string { - return "loft-management" -} - -func ParseContext(contextName string) (isLoftContext bool, cluster string, namespace string, vCluster string) { - splitted := strings.Split(contextName, "_") - if len(splitted) == 0 || (splitted[0] != "loft" && splitted[0] != "loft-vcluster") { - return false, "", "", "" - } - - // cluster or space context - if splitted[0] == "loft" { - if len(splitted) > 3 || len(splitted) == 1 { - return false, "", "", "" - } else if len(splitted) == 2 { - return true, splitted[1], "", "" - } - - return true, splitted[2], splitted[1], "" - } - - // vCluster context - if len(splitted) != 4 { - return false, "", "", "" - } - - return true, splitted[3], splitted[2], splitted[1] -} - -func CurrentContext() (string, error) { - config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() - if err != nil { - return "", err - } - - return config.CurrentContext, nil -} - -// DeleteContext deletes the context with the given name from the kube config -func DeleteContext(contextName string) error { - config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() - if err != nil { - return err - } - - delete(config.Contexts, contextName) - delete(config.Clusters, contextName) - delete(config.AuthInfos, contextName) - - if config.CurrentContext == contextName { - config.CurrentContext = "" - for name := range config.Contexts { - config.CurrentContext = name - break - } - } - - // Save the config - return clientcmd.ModifyConfig(clientcmd.NewDefaultClientConfigLoadingRules(), config, false) -} - -func updateKubeConfig(contextName string, cluster *api.Cluster, authInfo *api.AuthInfo, namespaceName string, setActive bool) error { - config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() - if err != nil { - return err - } - - config.Clusters[contextName] = cluster - config.AuthInfos[contextName] = authInfo - - // Update kube context - context := api.NewContext() - context.Cluster = contextName - context.AuthInfo = contextName - context.Namespace = namespaceName - - config.Contexts[contextName] = context - if setActive { - config.CurrentContext = contextName - } - - // Save the config - return clientcmd.ModifyConfig(clientcmd.NewDefaultClientConfigLoadingRules(), config, false) -} - -func printKubeConfigTo(contextName string, cluster *api.Cluster, authInfo *api.AuthInfo, namespaceName string, writer io.Writer) error { - config := api.NewConfig() - - config.Clusters[contextName] = cluster - config.AuthInfos[contextName] = authInfo - - // Update kube context - context := api.NewContext() - context.Cluster = contextName - context.AuthInfo = contextName - context.Namespace = namespaceName - - config.Contexts[contextName] = context - config.CurrentContext = contextName - - // set kind & version - config.APIVersion = "v1" - config.Kind = "Config" - - out, err := clientcmd.Write(*config) - if err != nil { - return err - } - - _, err = writer.Write(out) - return err -} - -// UpdateKubeConfig updates the kube config and adds the virtual cluster context -func UpdateKubeConfig(options ContextOptions) error { - contextName, cluster, authInfo, err := createContext(options) - if err != nil { - return err - } - - // we don't want to set the space name here as the default namespace in the virtual cluster, because it couldn't exist - return updateKubeConfig(contextName, cluster, authInfo, options.CurrentNamespace, options.SetActive) -} - -// PrintKubeConfigTo prints the given config to the writer -func PrintKubeConfigTo(options ContextOptions, writer io.Writer) error { - contextName, cluster, authInfo, err := createContext(options) - if err != nil { - return err - } - - // we don't want to set the space name here as the default namespace in the virtual cluster, because it couldn't exist - return printKubeConfigTo(contextName, cluster, authInfo, options.CurrentNamespace, writer) -} - -// PrintTokenKubeConfig writes the kube config to the os.Stdout -func PrintTokenKubeConfig(restConfig *rest.Config, token string) error { - contextName, cluster, authInfo := createTokenContext(restConfig, token) - - return printKubeConfigTo(contextName, cluster, authInfo, "", os.Stdout) -} - -// WriteTokenKubeConfig writes the kube config to the io.Writer -func WriteTokenKubeConfig(restConfig *rest.Config, token string, w io.Writer) error { - contextName, cluster, authInfo := createTokenContext(restConfig, token) - - return printKubeConfigTo(contextName, cluster, authInfo, "", w) -} - -func createTokenContext(restConfig *rest.Config, token string) (string, *api.Cluster, *api.AuthInfo) { - contextName := "default" - - cluster := api.NewCluster() - cluster.Server = restConfig.Host - cluster.InsecureSkipTLSVerify = restConfig.Insecure - cluster.CertificateAuthority = restConfig.CAFile - cluster.CertificateAuthorityData = restConfig.CAData - cluster.TLSServerName = restConfig.ServerName - - authInfo := api.NewAuthInfo() - authInfo.Token = token - - return contextName, cluster, authInfo -} - -func createContext(options ContextOptions) (string, *api.Cluster, *api.AuthInfo, error) { - contextName := options.Name - cluster := api.NewCluster() - cluster.Server = options.Server - cluster.CertificateAuthorityData = options.CaData - cluster.InsecureSkipTLSVerify = options.InsecureSkipTLSVerify - - authInfo := api.NewAuthInfo() - if options.Token != "" || options.ClientCertificateData != nil || options.ClientKeyData != nil { - authInfo.Token = options.Token - authInfo.ClientKeyData = options.ClientKeyData - authInfo.ClientCertificateData = options.ClientCertificateData - } else { - command, err := os.Executable() - if err != nil { - return "", nil, nil, err - } - - absConfigPath, err := filepath.Abs(options.ConfigPath) - if err != nil { - return "", nil, nil, err - } - - if options.VirtualClusterAccessPointEnabled { - projectName, virtualClusterName := virtualClusterInstanceProjectAndNameFromContextName(contextName) - authInfo.Exec = &api.ExecConfig{ - APIVersion: v1beta1.SchemeGroupVersion.String(), - Command: command, - Args: []string{"token", "--silent", "--project", projectName, "--virtual-cluster", virtualClusterName}, - } - } else { - authInfo.Exec = &api.ExecConfig{ - APIVersion: v1beta1.SchemeGroupVersion.String(), - Command: command, - Args: []string{"token", "--silent", "--config", absConfigPath}, - } - if options.DirectClusterEndpointEnabled { - authInfo.Exec.Args = append(authInfo.Exec.Args, "--direct-cluster-endpoint") - } - } - } - - return contextName, cluster, authInfo, nil -} diff --git a/pkg/platform/loftclient/client.go b/pkg/platform/loftclient/client.go deleted file mode 100644 index d1321d1df..000000000 --- a/pkg/platform/loftclient/client.go +++ /dev/null @@ -1,627 +0,0 @@ -package client - -import ( - "context" - "crypto/x509" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/blang/semver" - "github.com/loft-sh/loftctl/v4/pkg/client/naming" - "github.com/loft-sh/loftctl/v4/pkg/kubeconfig" - - "github.com/loft-sh/api/v4/pkg/auth" - "github.com/loft-sh/api/v4/pkg/product" - - managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" - storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - - "github.com/loft-sh/loftctl/v4/pkg/constants" - "github.com/loft-sh/loftctl/v4/pkg/kube" - "github.com/loft-sh/loftctl/v4/pkg/upgrade" - "github.com/loft-sh/log" - "github.com/mitchellh/go-homedir" - perrors "github.com/pkg/errors" - "github.com/skratchdot/open-golang/open" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -var CacheFolder = ".loft" - -// DefaultCacheConfig is the path to the config -var DefaultCacheConfig = "config.json" - -const ( - VersionPath = "%s/version" - LoginPath = "%s/login?cli=true" - RedirectPath = "%s/spaces" - AccessKeyPath = "%s/profile/access-keys" - RefreshToken = time.Minute * 30 -) - -func init() { - hd, _ := homedir.Dir() - if folder, ok := os.LookupEnv(constants.LoftCacheFolderEnv); ok { - CacheFolder = filepath.Join(hd, folder) - } else { - CacheFolder = filepath.Join(hd, CacheFolder) - } - DefaultCacheConfig = filepath.Join(CacheFolder, DefaultCacheConfig) -} - -type Client interface { - Management() (kube.Interface, error) - ManagementConfig() (*rest.Config, error) - - SpaceInstance(project, name string) (kube.Interface, error) - SpaceInstanceConfig(project, name string) (*rest.Config, error) - - VirtualClusterInstance(project, name string) (kube.Interface, error) - VirtualClusterInstanceConfig(project, name string) (*rest.Config, error) - - Cluster(cluster string) (kube.Interface, error) - ClusterConfig(cluster string) (*rest.Config, error) - - VirtualCluster(cluster, namespace, virtualCluster string) (kube.Interface, error) - VirtualClusterConfig(cluster, namespace, virtualCluster string) (*rest.Config, error) - - Login(host string, insecure bool, log log.Logger) error - LoginWithAccessKey(host, accessKey string, insecure bool) error - LoginRaw(host, accessKey string, insecure bool) error - - Logout(ctx context.Context) error - - Version() (*auth.Version, error) - Config() *Config - DirectClusterEndpointToken(forceRefresh bool) (string, error) - VirtualClusterAccessPointCertificate(project, virtualCluster string, forceRefresh bool) (string, string, error) - Save() error -} - -func NewClient() Client { - return &client{ - config: &Config{}, - } -} - -func NewClientFromPath(path string) (Client, error) { - c := &client{ - configPath: path, - } - - err := c.initConfig() - if err != nil { - return nil, err - } - - return c, nil -} - -type client struct { - config *Config - configPath string - configOnce sync.Once -} - -// Logout implements Client. -func (c *client) Logout(ctx context.Context) error { - managementClient, err := c.Management() - if err != nil { - return fmt.Errorf("create management client: %w", err) - } - - self, err := managementClient.Loft().ManagementV1().Selves().Create(ctx, &managementv1.Self{}, metav1.CreateOptions{}) - if err != nil { - return fmt.Errorf("get self: %w", err) - } - - if self.Status.AccessKey != "" && self.Status.AccessKeyType == storagev1.AccessKeyTypeLogin { - err = managementClient.Loft().ManagementV1().OwnedAccessKeys().Delete(ctx, self.Status.AccessKey, metav1.DeleteOptions{}) - if err != nil { - return fmt.Errorf("delete access key: %w", err) - } - } - - return nil -} - -func (c *client) initConfig() error { - var retErr error - c.configOnce.Do(func() { - // load the config or create new one if not found - content, err := os.ReadFile(c.configPath) - if err != nil { - if os.IsNotExist(err) { - c.config = NewConfig() - return - } - - retErr = err - return - } - - config := &Config{ - VirtualClusterAccessPointCertificates: make(map[string]VirtualClusterCertificatesEntry), - } - err = json.Unmarshal(content, config) - if err != nil { - retErr = err - return - } - - c.config = config - }) - - return retErr -} - -func (c *client) VirtualClusterAccessPointCertificate(project, virtualCluster string, forceRefresh bool) (string, string, error) { - if c.config == nil { - return "", "", perrors.New("no config loaded") - } - - contextName := kubeconfig.VirtualClusterInstanceContextName(project, virtualCluster) - - // see if we have stored cert data for this vci - now := metav1.Now() - cachedVirtualClusterAccessPointCertificate, ok := c.config.VirtualClusterAccessPointCertificates[contextName] - if !forceRefresh && ok && cachedVirtualClusterAccessPointCertificate.LastRequested.Add(RefreshToken).After(now.Time) && cachedVirtualClusterAccessPointCertificate.ExpirationTime.After(now.Time) { - return cachedVirtualClusterAccessPointCertificate.CertificateData, cachedVirtualClusterAccessPointCertificate.KeyData, nil - } - - // refresh token - managementClient, err := c.Management() - if err != nil { - return "", "", err - } - - kubeConfigResponse, err := managementClient.Loft().ManagementV1().VirtualClusterInstances(naming.ProjectNamespace(project)).GetKubeConfig( - context.Background(), - virtualCluster, - &managementv1.VirtualClusterInstanceKubeConfig{ - Spec: managementv1.VirtualClusterInstanceKubeConfigSpec{ - CertificateTTL: ptr.To[int32](86_400), - }, - }, - metav1.CreateOptions{}, - ) - if err != nil { - return "", "", perrors.Wrap(err, "fetch certificate data") - } - - certificateData, keyData, err := getCertificateAndKeyDataFromKubeConfig(kubeConfigResponse.Status.KubeConfig) - if err != nil { - return "", "", err - } - - if c.config.VirtualClusterAccessPointCertificates == nil { - c.config.VirtualClusterAccessPointCertificates = make(map[string]VirtualClusterCertificatesEntry) - } - c.config.VirtualClusterAccessPointCertificates[contextName] = VirtualClusterCertificatesEntry{ - CertificateData: certificateData, - KeyData: keyData, - LastRequested: now, - ExpirationTime: now.Add(86_400 * time.Second), - } - - err = c.Save() - if err != nil { - return "", "", perrors.Wrap(err, "save config") - } - - return certificateData, keyData, nil -} - -func getCertificateAndKeyDataFromKubeConfig(config string) (string, string, error) { - clientCfg, err := clientcmd.NewClientConfigFromBytes([]byte(config)) - if err != nil { - return "", "", err - } - - apiCfg, err := clientCfg.RawConfig() - if err != nil { - return "", "", err - } - - return string(apiCfg.AuthInfos["vcluster"].ClientCertificateData), string(apiCfg.AuthInfos["vcluster"].ClientKeyData), nil -} - -func (c *client) DirectClusterEndpointToken(forceRefresh bool) (string, error) { - if c.config == nil { - return "", perrors.New("no config loaded") - } - - // check if we can use existing token - now := metav1.Now() - if !forceRefresh && c.config.DirectClusterEndpointToken != "" && c.config.DirectClusterEndpointTokenRequested != nil && c.config.DirectClusterEndpointTokenRequested.Add(RefreshToken).After(now.Time) { - return c.config.DirectClusterEndpointToken, nil - } - - // refresh token - managementClient, err := c.Management() - if err != nil { - return "", err - } - - clusterGatewayToken, err := managementClient.Loft().ManagementV1().DirectClusterEndpointTokens().Create(context.Background(), &managementv1.DirectClusterEndpointToken{}, metav1.CreateOptions{}) - if err != nil { - if c.config.DirectClusterEndpointToken != "" && c.config.DirectClusterEndpointTokenRequested != nil && c.config.DirectClusterEndpointTokenRequested.Add(time.Hour*24).After(now.Time) { - return c.config.DirectClusterEndpointToken, nil - } - - return "", err - } else if clusterGatewayToken.Status.Token == "" { - return "", perrors.New("retrieved an empty token") - } - - c.config.DirectClusterEndpointToken = clusterGatewayToken.Status.Token - c.config.DirectClusterEndpointTokenRequested = &now - err = c.Save() - if err != nil { - return "", perrors.Wrap(err, "save config") - } - - return c.config.DirectClusterEndpointToken, nil -} - -func (c *client) Save() error { - if c.configPath == "" { - return nil - } - if c.config == nil { - return perrors.New("no config to write") - } - if c.config.Kind == "" { - c.config.Kind = "Config" - } - if c.config.APIVersion == "" { - c.config.APIVersion = "storage.loft.sh/v1" - } - - err := os.MkdirAll(filepath.Dir(c.configPath), 0o755) - if err != nil { - return err - } - - out, err := json.Marshal(c.config) - if err != nil { - return err - } - - return os.WriteFile(c.configPath, out, 0o660) -} - -func (c *client) ManagementConfig() (*rest.Config, error) { - return c.restConfig("/kubernetes/management") -} - -func (c *client) Management() (kube.Interface, error) { - restConfig, err := c.ManagementConfig() - if err != nil { - return nil, err - } - - return kube.NewForConfig(restConfig) -} - -func (c *client) SpaceInstanceConfig(project, name string) (*rest.Config, error) { - return c.restConfig("/kubernetes/project/" + project + "/space/" + name) -} - -func (c *client) SpaceInstance(project, name string) (kube.Interface, error) { - restConfig, err := c.SpaceInstanceConfig(project, name) - if err != nil { - return nil, err - } - - return kube.NewForConfig(restConfig) -} - -func (c *client) VirtualClusterInstanceConfig(project, name string) (*rest.Config, error) { - return c.restConfig("/kubernetes/project/" + project + "/virtualcluster/" + name) -} - -func (c *client) VirtualClusterInstance(project, name string) (kube.Interface, error) { - restConfig, err := c.VirtualClusterInstanceConfig(project, name) - if err != nil { - return nil, err - } - - return kube.NewForConfig(restConfig) -} - -func (c *client) ClusterConfig(cluster string) (*rest.Config, error) { - return c.restConfig("/kubernetes/cluster/" + cluster) -} - -func (c *client) Cluster(cluster string) (kube.Interface, error) { - restConfig, err := c.ClusterConfig(cluster) - if err != nil { - return nil, err - } - - return kube.NewForConfig(restConfig) -} - -func (c *client) VirtualClusterConfig(cluster, namespace, virtualCluster string) (*rest.Config, error) { - return c.restConfig("/kubernetes/virtualcluster/" + cluster + "/" + namespace + "/" + virtualCluster) -} - -func (c *client) VirtualCluster(cluster, namespace, virtualCluster string) (kube.Interface, error) { - restConfig, err := c.VirtualClusterConfig(cluster, namespace, virtualCluster) - if err != nil { - return nil, err - } - - return kube.NewForConfig(restConfig) -} - -func (c *client) Config() *Config { - return c.config -} - -type keyStruct struct { - Key string -} - -func verifyHost(host string) error { - if !strings.HasPrefix(host, "https") { - return fmt.Errorf("cannot log into a non https loft instance '%s', please make sure you have TLS enabled", host) - } - - return nil -} - -func (c *client) Version() (*auth.Version, error) { - restConfig, err := c.restConfig("") - if err != nil { - return nil, err - } - - restClient, err := kube.NewForConfig(restConfig) - if err != nil { - return nil, err - } - - raw, err := restClient.CoreV1().RESTClient().Get().RequestURI("/version").DoRaw(context.Background()) - if err != nil { - return nil, perrors.New(fmt.Sprintf("%s\n\nYou may need to login again via `%s login %s --insecure` to allow self-signed certificates\n", err.Error(), os.Args[0], restConfig.Host)) - } - - version := &auth.Version{} - err = json.Unmarshal(raw, version) - if err != nil { - return nil, perrors.Wrap(err, "parse version response") - } - - return version, nil -} - -func (c *client) Login(host string, insecure bool, log log.Logger) error { - var ( - loginURL = fmt.Sprintf(LoginPath, host) - key keyStruct - keyChannel = make(chan keyStruct) - ) - - err := verifyHost(host) - if err != nil { - return err - } - - server := startServer(fmt.Sprintf(RedirectPath, host), keyChannel, log) - err = open.Run(fmt.Sprintf(LoginPath, host)) - if err != nil { - return fmt.Errorf("couldn't open the login page in a browser: %w. Please use the --access-key flag for the login command. You can generate an access key here: %s", err, fmt.Sprintf(AccessKeyPath, host)) - } - log.Infof("If the browser does not open automatically, please navigate to %s", loginURL) - msg := "If you have problems logging in, please navigate to %s/profile/access-keys, click on 'Create Access Key' and then login via '%s %s --access-key ACCESS_KEY" - if insecure { - msg += " --insecure" - } - msg += "'" - log.Infof(msg, host, product.LoginCmd(), host) - log.Infof("Logging into %s...", product.DisplayName()) - - key = <-keyChannel - - go func() { - err = server.Shutdown(context.Background()) - if err != nil { - log.Debugf("Error shutting down server: %v", err) - } - }() - - close(keyChannel) - return c.LoginWithAccessKey(host, key.Key, insecure) -} - -func (c *client) LoginRaw(host, accessKey string, insecure bool) error { - if c.config.Host == host && c.config.AccessKey == accessKey { - return nil - } - - c.config.Host = host - c.config.Insecure = insecure - c.config.AccessKey = accessKey - c.config.DirectClusterEndpointToken = "" - c.config.DirectClusterEndpointTokenRequested = nil - return c.Save() -} - -func (c *client) LoginWithAccessKey(host, accessKey string, insecure bool) error { - err := verifyHost(host) - if err != nil { - return err - } - if c.config.Host == host && c.config.AccessKey == accessKey { - return nil - } - - // delete old access key if were logged in before - if c.config.AccessKey != "" { - managementClient, err := c.Management() - if err == nil { - self, err := managementClient.Loft().ManagementV1().Selves().Create(context.TODO(), &managementv1.Self{}, metav1.CreateOptions{}) - if err == nil && self.Status.AccessKey != "" && self.Status.AccessKeyType == storagev1.AccessKeyTypeLogin { - _ = managementClient.Loft().ManagementV1().OwnedAccessKeys().Delete(context.TODO(), self.Status.AccessKey, metav1.DeleteOptions{}) - } - } - } - - c.config.Host = host - c.config.Insecure = insecure - c.config.AccessKey = accessKey - c.config.DirectClusterEndpointToken = "" - c.config.DirectClusterEndpointTokenRequested = nil - - // verify version - err = VerifyVersion(c) - if err != nil { - return err - } - - // verify the connection works - managementClient, err := c.Management() - if err != nil { - return perrors.Wrap(err, "create management client") - } - - // try to get self - _, err = managementClient.Loft().ManagementV1().Selves().Create(context.TODO(), &managementv1.Self{}, metav1.CreateOptions{}) - if err != nil { - var urlError *url.Error - if errors.As(err, &urlError) { - var err x509.UnknownAuthorityError - if errors.As(urlError.Err, &err) { - return fmt.Errorf("unsafe login endpoint '%s', if you wish to login into an insecure loft endpoint run with the '--insecure' flag", c.config.Host) - } - } - - return perrors.Errorf("error logging in: %v", err) - } - - return c.Save() -} - -// VerifyVersion checks if the Loft version is compatible with this CLI version -func VerifyVersion(baseClient Client) error { - v, err := baseClient.Version() - if err != nil { - return err - } else if v.Version == "v0.0.0" { - return nil - } - - backendMajor, err := strconv.Atoi(v.Major) - if err != nil { - return perrors.Wrap(err, "parse major version string") - } - - cliVersionStr := upgrade.GetVersion() - if cliVersionStr == "" { - return nil - } - - cliVersion, err := semver.Parse(cliVersionStr) - if err != nil { - return err - } - - if int(cliVersion.Major) > backendMajor { - return fmt.Errorf("unsupported %[1]s version %[2]s. Please downgrade your CLI to below v%[3]d.0.0 to support this version, as %[1]s v%[3]d.0.0 and newer versions are incompatible with v%[4]d.x.x", product.DisplayName(), v.Version, cliVersion.Major, backendMajor) - } else if int(cliVersion.Major) < backendMajor { - return fmt.Errorf("unsupported %[1]s version %[2]s. Please upgrade your CLI to v%[3]d.0.0 or above to support this version, as %[1]s v%[3]d.0.0 and newer versions are incompatible with v%[4]d.x.x", product.DisplayName(), v.Version, backendMajor, cliVersion.Major) - } - - return nil -} - -func (c *client) restConfig(hostSuffix string) (*rest.Config, error) { - if c.config == nil { - return nil, perrors.New("no config loaded") - } else if c.config.Host == "" || c.config.AccessKey == "" { - return nil, perrors.New(fmt.Sprintf("not logged in, please make sure you have run '%s [%s]'", product.LoginCmd(), product.Url())) - } - - // build a rest config - config, err := GetRestConfig(c.config.Host+hostSuffix, c.config.AccessKey, c.config.Insecure) - if err != nil { - return nil, err - } - - return config, err -} - -func GetKubeConfig(host, token, namespace string, insecure bool) clientcmd.ClientConfig { - contextName := "local" - kubeConfig := clientcmdapi.NewConfig() - kubeConfig.Contexts = map[string]*clientcmdapi.Context{ - contextName: { - Cluster: contextName, - AuthInfo: contextName, - Namespace: namespace, - }, - } - kubeConfig.Clusters = map[string]*clientcmdapi.Cluster{ - contextName: { - Server: host, - InsecureSkipTLSVerify: insecure, - }, - } - kubeConfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{ - contextName: { - Token: token, - }, - } - kubeConfig.CurrentContext = contextName - return clientcmd.NewDefaultClientConfig(*kubeConfig, &clientcmd.ConfigOverrides{}) -} - -func GetRestConfig(host, token string, insecure bool) (*rest.Config, error) { - config, err := GetKubeConfig(host, token, "", insecure).ClientConfig() - if err != nil { - return nil, err - } - config.UserAgent = constants.LoftctlUserAgentPrefix + upgrade.GetVersion() - - return config, nil -} - -func startServer(redirectURI string, keyChannel chan keyStruct, log log.Logger) *http.Server { - srv := &http.Server{Addr: ":25843"} - - http.HandleFunc("/login", func(w http.ResponseWriter, r *http.Request) { - keys, ok := r.URL.Query()["key"] - if !ok || len(keys[0]) == 0 { - log.Warn("Login: the key used to login is not valid") - return - } - - keyChannel <- keyStruct{ - Key: keys[0], - } - http.Redirect(w, r, redirectURI, http.StatusSeeOther) - }) - - go func() { - // cannot panic, because this probably is an intentional close - _ = srv.ListenAndServe() - }() - - // returning reference so caller can call Shutdown() - return srv -} diff --git a/pkg/platform/loftclient/config.go b/pkg/platform/loftclient/config.go deleted file mode 100644 index 0ebc14f37..000000000 --- a/pkg/platform/loftclient/config.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Config defines the client config structure -type Config struct { - metav1.TypeMeta `json:",inline"` - - // host is the http endpoint of how to access loft - // +optional - Host string `json:"host,omitempty"` - - // LastInstallContext is the last install context - // +optional - LastInstallContext string `json:"lastInstallContext,omitempty"` - - // insecure specifies if the loft instance is insecure - // +optional - Insecure bool `json:"insecure,omitempty"` - - // access key is the access key for the given loft host - // +optional - AccessKey string `json:"accesskey,omitempty"` - - // virtual cluster access key is the access key for the given loft host to create virtual clusters - // +optional - VirtualClusterAccessKey string `json:"virtualClusterAccessKey,omitempty"` - - // DEPRECATED: do not use anymore - // the direct cluster endpoint token - // +optional - DirectClusterEndpointToken string `json:"directClusterEndpointToken,omitempty"` - - // DEPRECATED: do not use anymore - // last time the direct cluster endpoint token was requested - // +optional - DirectClusterEndpointTokenRequested *metav1.Time `json:"directClusterEndpointTokenRequested,omitempty"` - - // map of cached certificates for "access point" mode virtual clusters - // +optional - VirtualClusterAccessPointCertificates map[string]VirtualClusterCertificatesEntry -} - -type VirtualClusterCertificatesEntry struct { - CertificateData string - KeyData string - LastRequested metav1.Time - ExpirationTime time.Time -} - -// NewConfig creates a new config -func NewConfig() *Config { - return &Config{ - TypeMeta: metav1.TypeMeta{ - Kind: "Config", - APIVersion: "storage.loft.sh/v1", - }, - } -} diff --git a/pkg/platform/loftclient/helper/helper.go b/pkg/platform/loftclient/helper/helper.go deleted file mode 100644 index ba5c9c46c..000000000 --- a/pkg/platform/loftclient/helper/helper.go +++ /dev/null @@ -1,1160 +0,0 @@ -package helper - -import ( - "context" - "errors" - "fmt" - "os" - "sort" - "strings" - - "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset/scheme" - "github.com/loft-sh/vcluster/pkg/platform/loftclient/naming" - authorizationv1 "k8s.io/api/authorization/v1" - - clusterv1 "github.com/loft-sh/agentapi/v4/pkg/apis/loft/cluster/v1" - managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" - "github.com/loft-sh/log" - "github.com/loft-sh/log/survey" - "github.com/loft-sh/vcluster/pkg/platform/clihelper" - "github.com/loft-sh/vcluster/pkg/platform/kube" - "github.com/loft-sh/vcluster/pkg/platform/kubeconfig" - client "github.com/loft-sh/vcluster/pkg/platform/loftclient" - "github.com/mgutz/ansi" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubectl/pkg/util/term" -) - -var errNoClusterAccess = errors.New("the user has no access to any cluster") - -type VirtualClusterInstanceProject struct { - VirtualCluster *managementv1.VirtualClusterInstance - Project *managementv1.Project -} - -type SpaceInstanceProject struct { - SpaceInstance *managementv1.SpaceInstance - Project *managementv1.Project -} - -func SelectVirtualClusterTemplate(ctx context.Context, baseClient client.Client, projectName, templateName string, log log.Logger) (*managementv1.VirtualClusterTemplate, error) { - managementClient, err := baseClient.Management() - if err != nil { - return nil, err - } - - projectTemplates, err := managementClient.Loft().ManagementV1().Projects().ListTemplates(ctx, projectName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - // select default template - if templateName == "" && projectTemplates.DefaultVirtualClusterTemplate != "" { - templateName = projectTemplates.DefaultVirtualClusterTemplate - } - - // try to find template - if templateName != "" { - for _, virtualClusterTemplate := range projectTemplates.VirtualClusterTemplates { - if virtualClusterTemplate.Name == templateName { - return &virtualClusterTemplate, nil - } - } - - return nil, fmt.Errorf("couldn't find template %s as allowed template in project %s", templateName, projectName) - } else if len(projectTemplates.VirtualClusterTemplates) == 0 { - return nil, fmt.Errorf("there are no allowed virtual cluster templates in project %s", projectName) - } else if len(projectTemplates.VirtualClusterTemplates) == 1 { - return &projectTemplates.VirtualClusterTemplates[0], nil - } - - templateNames := []string{} - for _, template := range projectTemplates.VirtualClusterTemplates { - templateNames = append(templateNames, clihelper.GetDisplayName(template.Name, template.Spec.DisplayName)) - } - answer, err := log.Question(&survey.QuestionOptions{ - Question: "Please choose a template to use", - DefaultValue: templateNames[0], - Options: templateNames, - }) - if err != nil { - return nil, err - } - for _, template := range projectTemplates.VirtualClusterTemplates { - if answer == clihelper.GetDisplayName(template.Name, template.Spec.DisplayName) { - return &template, nil - } - } - - return nil, fmt.Errorf("answer not found") -} - -func SelectSpaceTemplate(ctx context.Context, baseClient client.Client, projectName, templateName string, log log.Logger) (*managementv1.SpaceTemplate, error) { - managementClient, err := baseClient.Management() - if err != nil { - return nil, err - } - - projectTemplates, err := managementClient.Loft().ManagementV1().Projects().ListTemplates(ctx, projectName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - // select default template - if templateName == "" && projectTemplates.DefaultSpaceTemplate != "" { - templateName = projectTemplates.DefaultSpaceTemplate - } - - // try to find template - if templateName != "" { - for _, spaceTemplate := range projectTemplates.SpaceTemplates { - if spaceTemplate.Name == templateName { - return &spaceTemplate, nil - } - } - - return nil, fmt.Errorf("couldn't find template %s as allowed template in project %s", templateName, projectName) - } else if len(projectTemplates.SpaceTemplates) == 0 { - return nil, fmt.Errorf("there are no allowed space templates in project %s", projectName) - } else if len(projectTemplates.SpaceTemplates) == 1 { - return &projectTemplates.SpaceTemplates[0], nil - } - - templateNames := []string{} - for _, template := range projectTemplates.SpaceTemplates { - templateNames = append(templateNames, clihelper.GetDisplayName(template.Name, template.Spec.DisplayName)) - } - answer, err := log.Question(&survey.QuestionOptions{ - Question: "Please choose a template to use", - DefaultValue: templateNames[0], - Options: templateNames, - }) - if err != nil { - return nil, err - } - for _, template := range projectTemplates.SpaceTemplates { - if answer == clihelper.GetDisplayName(template.Name, template.Spec.DisplayName) { - return &template, nil - } - } - - return nil, fmt.Errorf("answer not found") -} - -func SelectVirtualClusterInstanceOrVirtualCluster(ctx context.Context, baseClient client.Client, virtualClusterName, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, string, error) { - if clusterName != "" || spaceName != "" { - virtualCluster, space, cluster, err := SelectVirtualClusterAndSpaceAndClusterName(ctx, baseClient, virtualClusterName, spaceName, clusterName, log) - return cluster, "", space, virtualCluster, err - } - - managementClient, err := baseClient.Management() - if err != nil { - return "", "", "", "", err - } - - // gather projects and virtual cluster instances to access - var projects []*managementv1.Project - if projectName != "" { - project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - return "", "", "", "", fmt.Errorf("couldn't find or access project %s", projectName) - } - - return "", "", "", "", err - } - - projects = append(projects, project) - } else { - projectsList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) - if err != nil || len(projectsList.Items) == 0 { - virtualCluster, space, cluster, err := SelectVirtualClusterAndSpaceAndClusterName(ctx, baseClient, virtualClusterName, spaceName, clusterName, log) - return cluster, "", space, virtualCluster, err - } - - for _, p := range projectsList.Items { - proj := p - projects = append(projects, &proj) - } - } - - // gather space instances in those projects - var virtualClusters []*VirtualClusterInstanceProject - for _, p := range projects { - if virtualClusterName != "" { - virtualClusterInstance, err := getProjectVirtualClusterInstance(ctx, managementClient, p, virtualClusterName) - if err != nil { - continue - } - - virtualClusters = append(virtualClusters, virtualClusterInstance) - } else { - projectVirtualClusters, err := getProjectVirtualClusterInstances(ctx, managementClient, p) - if err != nil { - continue - } - - virtualClusters = append(virtualClusters, projectVirtualClusters...) - } - } - - // get unformatted options - var optionsUnformatted [][]string - for _, virtualCluster := range virtualClusters { - optionsUnformatted = append(optionsUnformatted, []string{"vcluster: " + clihelper.GetDisplayName(virtualCluster.VirtualCluster.Name, virtualCluster.VirtualCluster.Spec.DisplayName), "Project: " + clihelper.GetDisplayName(virtualCluster.Project.Name, virtualCluster.Project.Spec.DisplayName)}) - } - - // check if there are virtualclusters - if len(virtualClusters) == 0 { - if virtualClusterName != "" { - return "", "", "", "", fmt.Errorf("couldn't find or access virtual cluster %s", virtualClusterName) - } - return "", "", "", "", fmt.Errorf("couldn't find a virtual cluster you have access to") - } else if len(virtualClusters) == 1 { - return "", virtualClusters[0].Project.Name, "", virtualClusters[0].VirtualCluster.Name, nil - } - - questionOptions := formatOptions("%s | %s", optionsUnformatted) - selectedOption, err := log.Question(&survey.QuestionOptions{ - Question: "Please choose a virtual cluster", - DefaultValue: questionOptions[0], - Options: questionOptions, - }) - if err != nil { - return "", "", "", "", err - } - - for idx, s := range questionOptions { - if s == selectedOption { - return "", virtualClusters[idx].Project.Name, "", virtualClusters[idx].VirtualCluster.Name, nil - } - } - - return "", "", "", "", fmt.Errorf("couldn't find answer") -} - -func SelectSpaceInstanceOrSpace(ctx context.Context, baseClient client.Client, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, error) { - if clusterName != "" { - space, cluster, err := SelectSpaceAndClusterName(ctx, baseClient, spaceName, clusterName, log) - return cluster, "", space, err - } - - managementClient, err := baseClient.Management() - if err != nil { - return "", "", "", err - } - - // gather projects and space instances to access - var projects []*managementv1.Project - if projectName != "" { - project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) - if err != nil { - if kerrors.IsNotFound(err) { - return "", "", "", fmt.Errorf("couldn't find or access project %s", projectName) - } - - return "", "", "", err - } - - projects = append(projects, project) - } else { - projectsList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) - if err != nil || len(projectsList.Items) == 0 { - space, cluster, err := SelectSpaceAndClusterName(ctx, baseClient, spaceName, clusterName, log) - return cluster, "", space, err - } - - for _, p := range projectsList.Items { - proj := p - projects = append(projects, &proj) - } - } - - // gather space instances in those projects - var spaces []*SpaceInstanceProject - for _, p := range projects { - if spaceName != "" { - spaceInstance, err := getProjectSpaceInstance(ctx, managementClient, p, spaceName) - if err != nil { - continue - } - - spaces = append(spaces, spaceInstance) - } else { - projectSpaceInstances, err := getProjectSpaceInstances(ctx, managementClient, p) - if err != nil { - continue - } - - spaces = append(spaces, projectSpaceInstances...) - } - } - - // get unformatted options - var optionsUnformatted [][]string - for _, space := range spaces { - optionsUnformatted = append(optionsUnformatted, []string{"Space: " + clihelper.GetDisplayName(space.SpaceInstance.Name, space.SpaceInstance.Spec.DisplayName), "Project: " + clihelper.GetDisplayName(space.Project.Name, space.Project.Spec.DisplayName)}) - } - - // check if there are spaces - if len(spaces) == 0 { - if spaceName != "" { - return "", "", "", fmt.Errorf("couldn't find or access space %s", spaceName) - } - return "", "", "", fmt.Errorf("couldn't find a space you have access to") - } else if len(spaces) == 1 { - return spaces[0].SpaceInstance.Spec.ClusterRef.Cluster, spaces[0].Project.Name, spaces[0].SpaceInstance.Name, nil - } - - questionOptions := formatOptions("%s | %s", optionsUnformatted) - selectedOption, err := log.Question(&survey.QuestionOptions{ - Question: "Please choose a space", - DefaultValue: questionOptions[0], - Options: questionOptions, - }) - if err != nil { - return "", "", "", err - } - - for idx, s := range questionOptions { - if s == selectedOption { - return spaces[idx].SpaceInstance.Spec.ClusterRef.Cluster, spaces[idx].Project.Name, spaces[idx].SpaceInstance.Name, nil - } - } - - return "", "", "", fmt.Errorf("couldn't find answer") -} - -func SelectProjectOrCluster(ctx context.Context, baseClient client.Client, clusterName, projectName string, allowClusterOnly bool, log log.Logger) (cluster string, project string, err error) { - if projectName != "" { - return clusterName, projectName, nil - } else if allowClusterOnly && clusterName != "" { - return clusterName, "", nil - } - - managementClient, err := baseClient.Management() - if err != nil { - return "", "", err - } - - projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) - if err != nil { - return "", "", err - } - - projectNames := []string{} - for _, project := range projectList.Items { - projectNames = append(projectNames, clihelper.GetDisplayName(project.Name, project.Spec.DisplayName)) - } - - if len(projectNames) == 0 { - cluster, err := SelectCluster(ctx, baseClient, log) - if err != nil { - if errors.Is(err, errNoClusterAccess) { - return "", "", fmt.Errorf("the user has no access to a project") - } - - return "", "", err - } - - return cluster, "", nil - } - - var selectedProject *managementv1.Project - if len(projectNames) == 1 { - selectedProject = &projectList.Items[0] - } else { - answer, err := log.Question(&survey.QuestionOptions{ - Question: "Please choose a project to use", - DefaultValue: projectNames[0], - Options: projectNames, - }) - if err != nil { - return "", "", err - } - for idx, project := range projectList.Items { - if answer == clihelper.GetDisplayName(project.Name, project.Spec.DisplayName) { - selectedProject = &projectList.Items[idx] - } - } - if selectedProject == nil { - return "", "", fmt.Errorf("answer not found") - } - } - - if clusterName == "" { - clusterName, err = SelectProjectCluster(ctx, baseClient, selectedProject, log) - return clusterName, selectedProject.Name, err - } - - return clusterName, selectedProject.Name, nil -} - -// SelectCluster lets the user select a cluster -func SelectCluster(ctx context.Context, baseClient client.Client, log log.Logger) (string, error) { - managementClient, err := baseClient.Management() - if err != nil { - return "", err - } - - clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) - if err != nil { - return "", err - } - - clusterNames := []string{} - for _, cluster := range clusterList.Items { - clusterNames = append(clusterNames, clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName)) - } - - if len(clusterList.Items) == 0 { - return "", errNoClusterAccess - } else if len(clusterList.Items) == 1 { - return clusterList.Items[0].Name, nil - } - - answer, err := log.Question(&survey.QuestionOptions{ - Question: "Please choose a cluster to use", - DefaultValue: clusterNames[0], - Options: clusterNames, - }) - if err != nil { - return "", err - } - for _, cluster := range clusterList.Items { - if answer == clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName) { - return cluster.Name, nil - } - } - return "", fmt.Errorf("answer not found") -} - -// SelectProjectCluster lets the user select a cluster from the project's allowed clusters -func SelectProjectCluster(ctx context.Context, baseClient client.Client, project *managementv1.Project, log log.Logger) (string, error) { - if !term.IsTerminal(os.Stdin) { - // Allow loft to schedule as before - return "", nil - } - - managementClient, err := baseClient.Management() - if err != nil { - return "", err - } - - clusterList, err := managementClient.Loft().ManagementV1().Projects().ListClusters(ctx, project.Name, metav1.GetOptions{}) - if err != nil { - return "", err - } - - anyClusterOption := "Any Cluster [Loft Selects Cluster]" - clusterNames := []string{} - for _, allowedCluster := range project.Spec.AllowedClusters { - if allowedCluster.Name == "*" { - clusterNames = append(clusterNames, anyClusterOption) - break - } - } - - for _, cluster := range clusterList.Clusters { - clusterNames = append(clusterNames, clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName)) - } - - if len(clusterList.Clusters) == 0 { - return "", errNoClusterAccess - } else if len(clusterList.Clusters) == 1 { - return clusterList.Clusters[0].Name, nil - } - - answer, err := log.Question(&survey.QuestionOptions{ - Question: "Please choose a cluster to use", - DefaultValue: clusterNames[0], - Options: clusterNames, - }) - if err != nil { - return "", err - } - - if answer == anyClusterOption { - return "", nil - } - - for _, cluster := range clusterList.Clusters { - if answer == clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName) { - return cluster.Name, nil - } - } - return "", fmt.Errorf("answer not found") -} - -// SelectUserOrTeam lets the user select an user or team in a cluster -func SelectUserOrTeam(ctx context.Context, baseClient client.Client, clusterName string, log log.Logger) (*clusterv1.EntityInfo, *clusterv1.EntityInfo, error) { - managementClient, err := baseClient.Management() - if err != nil { - return nil, nil, err - } - - clusterAccess, err := managementClient.Loft().ManagementV1().Clusters().ListAccess(ctx, clusterName, metav1.GetOptions{}) - if err != nil { - return nil, nil, err - } - - var user *clusterv1.EntityInfo - if len(clusterAccess.Users) > 0 { - user = &clusterAccess.Users[0].Info - } - - teams := []*clusterv1.EntityInfo{} - for _, team := range clusterAccess.Teams { - t := team - teams = append(teams, &t.Info) - } - - if user == nil && len(teams) == 0 { - return nil, nil, fmt.Errorf("the user has no access to cluster %s", clusterName) - } else if user != nil && len(teams) == 0 { - return user, nil, nil - } else if user == nil && len(teams) == 1 { - return nil, teams[0], nil - } - - names := []string{} - if user != nil { - names = append(names, "User "+clihelper.DisplayName(user)) - } - for _, t := range teams { - names = append(names, "Team "+clihelper.DisplayName(t)) - } - - answer, err := log.Question(&survey.QuestionOptions{ - Question: "Please choose a user or team to use", - DefaultValue: names[0], - Options: names, - }) - if err != nil { - return nil, nil, err - } - - if user != nil && "User "+clihelper.DisplayName(user) == answer { - return user, nil, nil - } - for _, t := range teams { - if "Team "+clihelper.DisplayName(t) == answer { - return nil, t, nil - } - } - - return nil, nil, fmt.Errorf("answer not found") -} - -type ClusterUserOrTeam struct { - Team bool - ClusterMember managementv1.ClusterMember -} - -func SelectClusterUserOrTeam(ctx context.Context, baseClient client.Client, clusterName, userName, teamName string, log log.Logger) (*ClusterUserOrTeam, error) { - if userName != "" && teamName != "" { - return nil, fmt.Errorf("team and user specified, please only choose one") - } - - managementClient, err := baseClient.Management() - if err != nil { - return nil, err - } - - members, err := managementClient.Loft().ManagementV1().Clusters().ListMembers(ctx, clusterName, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("retrieve cluster members: %w", err) - } - - matchedMembers := []ClusterUserOrTeam{} - optionsUnformatted := [][]string{} - for _, user := range members.Users { - if teamName != "" { - continue - } else if userName != "" && user.Info.Name != userName { - continue - } - - matchedMembers = append(matchedMembers, ClusterUserOrTeam{ - ClusterMember: user, - }) - displayName := user.Info.DisplayName - if displayName == "" { - displayName = user.Info.Name - } - - optionsUnformatted = append(optionsUnformatted, []string{"User: " + displayName, "Kube User: " + user.Info.Name}) - } - for _, team := range members.Teams { - if userName != "" { - continue - } else if teamName != "" && team.Info.Name != teamName { - continue - } - - matchedMembers = append(matchedMembers, ClusterUserOrTeam{ - Team: true, - ClusterMember: team, - }) - displayName := team.Info.DisplayName - if displayName == "" { - displayName = team.Info.Name - } - - optionsUnformatted = append(optionsUnformatted, []string{"Team: " + displayName, "Kube Team: " + team.Info.Name}) - } - - questionOptions := formatOptions("%s | %s", optionsUnformatted) - if len(questionOptions) == 0 { - if userName == "" && teamName == "" { - return nil, fmt.Errorf("couldn't find any space") - } else if userName != "" { - return nil, fmt.Errorf("couldn't find user %s in cluster %s", ansi.Color(userName, "white+b"), ansi.Color(clusterName, "white+b")) - } - - return nil, fmt.Errorf("couldn't find team %s in cluster %s", ansi.Color(teamName, "white+b"), ansi.Color(clusterName, "white+b")) - } else if len(questionOptions) == 1 { - return &matchedMembers[0], nil - } - - selectedMember, err := log.Question(&survey.QuestionOptions{ - Question: "Please choose a user or team", - DefaultValue: questionOptions[0], - Options: questionOptions, - }) - if err != nil { - return nil, err - } - - for idx, s := range questionOptions { - if s == selectedMember { - return &matchedMembers[idx], nil - } - } - - return nil, fmt.Errorf("selected question option not found") -} - -func GetVirtualClusterInstances(ctx context.Context, baseClient client.Client) ([]*VirtualClusterInstanceProject, error) { - managementClient, err := baseClient.Management() - if err != nil { - return nil, err - } - - projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - - var retVClusters []*VirtualClusterInstanceProject - for _, project := range projectList.Items { - p := &project - - virtualClusterInstances, err := getProjectVirtualClusterInstances(ctx, managementClient, p) - if err != nil { - return nil, err - } - - retVClusters = append(retVClusters, virtualClusterInstances...) - } - - return retVClusters, nil -} - -func CanAccessProjectSecret(ctx context.Context, managementClient kube.Interface, namespace, name string) (bool, error) { - return CanAccessInstance(ctx, managementClient, namespace, name, "projectsecrets") -} - -func CanAccessInstance(ctx context.Context, managementClient kube.Interface, namespace, name string, resource string) (bool, error) { - selfSubjectAccessReview, err := managementClient.Loft().ManagementV1().SelfSubjectAccessReviews().Create(ctx, &managementv1.SelfSubjectAccessReview{ - Spec: managementv1.SelfSubjectAccessReviewSpec{ - SelfSubjectAccessReviewSpec: authorizationv1.SelfSubjectAccessReviewSpec{ - ResourceAttributes: &authorizationv1.ResourceAttributes{ - Verb: "use", - Group: managementv1.SchemeGroupVersion.Group, - Version: managementv1.SchemeGroupVersion.Version, - Resource: resource, - Namespace: namespace, - Name: name, - }, - }, - }, - }, metav1.CreateOptions{}) - if err != nil { - return false, err - } else if !selfSubjectAccessReview.Status.Allowed || selfSubjectAccessReview.Status.Denied { - return false, nil - } - return true, nil -} - -func GetSpaceInstances(ctx context.Context, baseClient client.Client) ([]*SpaceInstanceProject, error) { - managementClient, err := baseClient.Management() - if err != nil { - return nil, err - } - - projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - - var retSpaces []*SpaceInstanceProject - for _, project := range projectList.Items { - p := &project - - spaceInstances, err := getProjectSpaceInstances(ctx, managementClient, p) - if err != nil { - return nil, err - } - - retSpaces = append(retSpaces, spaceInstances...) - } - - return retSpaces, nil -} - -type ProjectProjectSecret struct { - ProjectSecret managementv1.ProjectSecret - Project string -} - -func GetProjectSecrets(ctx context.Context, managementClient kube.Interface, projectNames ...string) ([]*ProjectProjectSecret, error) { - var projects []*managementv1.Project - if len(projectNames) == 0 { - projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - - for idx := range projectList.Items { - projectItem := projectList.Items[idx] - projects = append(projects, &projectItem) - } - } else { - for _, projectName := range projectNames { - project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - projects = append(projects, project) - } - } - - var retSecrets []*ProjectProjectSecret - for _, project := range projects { - projectSecrets, err := managementClient.Loft().ManagementV1().ProjectSecrets(naming.ProjectNamespace(project.Name)).List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - - for _, projectSecret := range projectSecrets.Items { - canAccess, err := CanAccessProjectSecret(ctx, managementClient, projectSecret.Namespace, projectSecret.Name) - if err != nil { - return nil, err - } else if !canAccess { - continue - } - - retSecrets = append(retSecrets, &ProjectProjectSecret{ - ProjectSecret: projectSecret, - Project: project.Name, - }) - } - } - - return retSecrets, nil -} - -type ClusterSpace struct { - clusterv1.Space - Cluster string -} - -// GetSpaces returns all spaces accessible by the user or team -func GetSpaces(ctx context.Context, baseClient client.Client, log log.Logger) ([]ClusterSpace, error) { - managementClient, err := baseClient.Management() - if err != nil { - return nil, err - } - - clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - - spaceList := []ClusterSpace{} - for _, cluster := range clusterList.Items { - clusterClient, err := baseClient.Cluster(cluster.Name) - if err != nil { - return nil, err - } - - spaces, err := clusterClient.Agent().ClusterV1().Spaces().List(ctx, metav1.ListOptions{}) - if err != nil { - if kerrors.IsForbidden(err) { - continue - } - - log.Warnf("Error retrieving spaces from cluster %s: %v", clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName), err) - continue - } - - for _, space := range spaces.Items { - spaceList = append(spaceList, ClusterSpace{ - Space: space, - Cluster: cluster.Name, - }) - } - } - sort.Slice(spaceList, func(i, j int) bool { - return spaceList[i].Name < spaceList[j].Name - }) - - return spaceList, nil -} - -type ClusterVirtualCluster struct { - clusterv1.VirtualCluster - Cluster string -} - -// GetVirtualClusters returns all virtual clusters the user has access to -func GetVirtualClusters(ctx context.Context, baseClient client.Client, log log.Logger) ([]ClusterVirtualCluster, error) { - managementClient, err := baseClient.Management() - if err != nil { - return nil, err - } - - clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) - if err != nil { - return nil, err - } - - virtualClusterList := []ClusterVirtualCluster{} - for _, cluster := range clusterList.Items { - clusterClient, err := baseClient.Cluster(cluster.Name) - if err != nil { - return nil, err - } - - virtualClusters, err := clusterClient.Agent().ClusterV1().VirtualClusters("").List(ctx, metav1.ListOptions{}) - if err != nil { - if kerrors.IsForbidden(err) { - continue - } - - log.Warnf("Error retrieving virtual clusters from cluster %s: %v", clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName), err) - continue - } - - for _, virtualCluster := range virtualClusters.Items { - virtualClusterList = append(virtualClusterList, ClusterVirtualCluster{ - VirtualCluster: virtualCluster, - Cluster: cluster.Name, - }) - } - } - sort.Slice(virtualClusterList, func(i, j int) bool { - return virtualClusterList[i].Name < virtualClusterList[j].Name - }) - - return virtualClusterList, nil -} - -// SelectSpaceAndClusterName selects a space and cluster name -func SelectSpaceAndClusterName(ctx context.Context, baseClient client.Client, spaceName, clusterName string, log log.Logger) (string, string, error) { - spaces, err := GetSpaces(ctx, baseClient, log) - if err != nil { - return "", "", err - } - - currentContext, err := kubeconfig.CurrentContext() - if err != nil { - return "", "", fmt.Errorf("loading kubernetes config: %w", err) - } - - isLoftContext, cluster, namespace, vCluster := kubeconfig.ParseContext(currentContext) - matchedSpaces := []ClusterSpace{} - questionOptionsUnformatted := [][]string{} - defaultIndex := 0 - for _, space := range spaces { - if spaceName != "" && space.Space.Name != spaceName { - continue - } else if clusterName != "" && space.Cluster != clusterName { - continue - } else if len(matchedSpaces) > 20 { - break - } - - if isLoftContext && vCluster == "" && cluster == space.Cluster && namespace == space.Space.Name { - defaultIndex = len(questionOptionsUnformatted) - } - - matchedSpaces = append(matchedSpaces, space) - spaceName := space.Space.Name - if space.Space.Annotations != nil && space.Space.Annotations["loft.sh/display-name"] != "" { - spaceName = space.Space.Annotations["loft.sh/display-name"] + " (" + spaceName + ")" - } - - questionOptionsUnformatted = append(questionOptionsUnformatted, []string{spaceName, space.Cluster}) - } - - questionOptions := formatOptions("Space: %s | Cluster: %s", questionOptionsUnformatted) - if len(questionOptions) == 0 { - if spaceName == "" { - return "", "", fmt.Errorf("couldn't find any space") - } else if clusterName != "" { - return "", "", fmt.Errorf("couldn't find space %s in cluster %s", ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) - } - - return "", "", fmt.Errorf("couldn't find space %s", ansi.Color(spaceName, "white+b")) - } else if len(questionOptions) == 1 { - return matchedSpaces[0].Space.Name, matchedSpaces[0].Cluster, nil - } - - selectedSpace, err := log.Question(&survey.QuestionOptions{ - Question: "Please choose a space", - DefaultValue: questionOptions[defaultIndex], - Options: questionOptions, - }) - if err != nil { - return "", "", err - } - - for idx, s := range questionOptions { - if s == selectedSpace { - clusterName = matchedSpaces[idx].Cluster - spaceName = matchedSpaces[idx].Space.Name - break - } - } - - return spaceName, clusterName, nil -} - -func GetCurrentUser(ctx context.Context, managementClient kube.Interface) (*managementv1.UserInfo, *clusterv1.EntityInfo, error) { - self, err := managementClient.Loft().ManagementV1().Selves().Create(ctx, &managementv1.Self{}, metav1.CreateOptions{}) - if err != nil { - return nil, nil, fmt.Errorf("get self: %w", err) - } else if self.Status.User == nil && self.Status.Team == nil { - return nil, nil, fmt.Errorf("no user or team name returned") - } - - return self.Status.User, self.Status.Team, nil -} - -func SelectVirtualClusterAndSpaceAndClusterName(ctx context.Context, baseClient client.Client, virtualClusterName, spaceName, clusterName string, log log.Logger) (string, string, string, error) { - virtualClusters, err := GetVirtualClusters(ctx, baseClient, log) - if err != nil { - return "", "", "", err - } - - currentContext, err := kubeconfig.CurrentContext() - if err != nil { - return "", "", "", fmt.Errorf("loading kubernetes config: %w", err) - } - - isLoftContext, cluster, namespace, vCluster := kubeconfig.ParseContext(currentContext) - matchedVClusters := []ClusterVirtualCluster{} - questionOptionsUnformatted := [][]string{} - defaultIndex := 0 - for _, virtualCluster := range virtualClusters { - if virtualClusterName != "" && virtualCluster.VirtualCluster.Name != virtualClusterName { - continue - } else if spaceName != "" && virtualCluster.VirtualCluster.Namespace != spaceName { - continue - } else if clusterName != "" && virtualCluster.Cluster != clusterName { - continue - } - - if isLoftContext && vCluster == virtualCluster.VirtualCluster.Name && cluster == virtualCluster.Cluster && namespace == virtualCluster.VirtualCluster.Namespace { - defaultIndex = len(questionOptionsUnformatted) - } - - matchedVClusters = append(matchedVClusters, virtualCluster) - vClusterName := virtualCluster.VirtualCluster.Name - if virtualCluster.VirtualCluster.Annotations != nil && virtualCluster.VirtualCluster.Annotations["loft.sh/display-name"] != "" { - vClusterName = virtualCluster.VirtualCluster.Annotations["loft.sh/display-name"] + " (" + vClusterName + ")" - } - - questionOptionsUnformatted = append(questionOptionsUnformatted, []string{vClusterName, virtualCluster.VirtualCluster.Namespace, virtualCluster.Cluster}) - } - - questionOptions := formatOptions("vCluster: %s | Space: %s | Cluster: %s", questionOptionsUnformatted) - if len(questionOptions) == 0 { - if virtualClusterName == "" { - return "", "", "", fmt.Errorf("couldn't find any virtual cluster") - } else if spaceName != "" { - return "", "", "", fmt.Errorf("couldn't find virtualcluster %s in space %s in cluster %s", ansi.Color(virtualClusterName, "white+b"), ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) - } else if clusterName != "" { - return "", "", "", fmt.Errorf("couldn't find virtualcluster %s in space %s in cluster %s", ansi.Color(virtualClusterName, "white+b"), ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) - } - - return "", "", "", fmt.Errorf("couldn't find virtual cluster %s", ansi.Color(virtualClusterName, "white+b")) - } else if len(questionOptions) == 1 { - return matchedVClusters[0].VirtualCluster.Name, matchedVClusters[0].VirtualCluster.Namespace, matchedVClusters[0].Cluster, nil - } - - selectedSpace, err := log.Question(&survey.QuestionOptions{ - Question: "Please choose a virtual cluster to use", - DefaultValue: questionOptions[defaultIndex], - Options: questionOptions, - }) - if err != nil { - return "", "", "", err - } - - for idx, s := range questionOptions { - if s == selectedSpace { - clusterName = matchedVClusters[idx].Cluster - virtualClusterName = matchedVClusters[idx].VirtualCluster.Name - spaceName = matchedVClusters[idx].VirtualCluster.Namespace - break - } - } - - return virtualClusterName, spaceName, clusterName, nil -} - -func formatOptions(format string, options [][]string) []string { - if len(options) == 0 { - return []string{} - } - - columnLengths := make([]int, len(options[0])) - for _, row := range options { - for i, column := range row { - if len(column) > columnLengths[i] { - columnLengths[i] = len(column) - } - } - } - - retOptions := []string{} - for _, row := range options { - columns := []interface{}{} - for i := range row { - value := row[i] - if columnLengths[i] > len(value) { - value = value + strings.Repeat(" ", columnLengths[i]-len(value)) - } - - columns = append(columns, value) - } - - retOptions = append(retOptions, fmt.Sprintf(format, columns...)) - } - - return retOptions -} - -func getProjectSpaceInstance(ctx context.Context, managementClient kube.Interface, project *managementv1.Project, spaceName string) (*SpaceInstanceProject, error) { - spaceInstance := &managementv1.SpaceInstance{} - err := managementClient.Loft().ManagementV1().RESTClient(). - Get(). - Resource("spaceinstances"). - Namespace(naming.ProjectNamespace(project.Name)). - Name(spaceName). - VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec). - Param("extended", "true"). - Do(ctx). - Into(spaceInstance) - if err != nil { - return nil, err - } - - if !spaceInstance.Status.CanUse { - return nil, fmt.Errorf("no use access") - } - - return &SpaceInstanceProject{ - SpaceInstance: spaceInstance, - Project: project, - }, nil -} - -func getProjectSpaceInstances(ctx context.Context, managementClient kube.Interface, project *managementv1.Project) ([]*SpaceInstanceProject, error) { - spaceInstanceList := &managementv1.SpaceInstanceList{} - err := managementClient.Loft().ManagementV1().RESTClient(). - Get(). - Resource("spaceinstances"). - Namespace(naming.ProjectNamespace(project.Name)). - VersionedParams(&metav1.ListOptions{}, scheme.ParameterCodec). - Param("extended", "true"). - Do(ctx). - Into(spaceInstanceList) - if err != nil { - return nil, err - } - - var spaces []*SpaceInstanceProject - for _, spaceInstance := range spaceInstanceList.Items { - if !spaceInstance.Status.CanUse { - continue - } - - s := spaceInstance - spaces = append(spaces, &SpaceInstanceProject{ - SpaceInstance: &s, - Project: project, - }) - } - return spaces, nil -} - -func getProjectVirtualClusterInstance(ctx context.Context, managementClient kube.Interface, project *managementv1.Project, virtualClusterName string) (*VirtualClusterInstanceProject, error) { - virtualClusterInstance := &managementv1.VirtualClusterInstance{} - err := managementClient.Loft().ManagementV1().RESTClient(). - Get(). - Resource("virtualclusterinstances"). - Namespace(naming.ProjectNamespace(project.Name)). - Name(virtualClusterName). - VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec). - Param("extended", "true"). - Do(ctx). - Into(virtualClusterInstance) - if err != nil { - return nil, err - } - - if !virtualClusterInstance.Status.CanUse { - return nil, fmt.Errorf("no use access") - } - - return &VirtualClusterInstanceProject{ - VirtualCluster: virtualClusterInstance, - Project: project, - }, nil -} - -func getProjectVirtualClusterInstances(ctx context.Context, managementClient kube.Interface, project *managementv1.Project) ([]*VirtualClusterInstanceProject, error) { - virtualClusterInstanceList := &managementv1.VirtualClusterInstanceList{} - err := managementClient.Loft().ManagementV1().RESTClient(). - Get(). - Resource("virtualclusterinstances"). - Namespace(naming.ProjectNamespace(project.Name)). - VersionedParams(&metav1.ListOptions{}, scheme.ParameterCodec). - Param("extended", "true"). - Do(ctx). - Into(virtualClusterInstanceList) - if err != nil { - return nil, err - } - - var virtualClusters []*VirtualClusterInstanceProject - for _, virtualClusterInstance := range virtualClusterInstanceList.Items { - if !virtualClusterInstance.Status.CanUse { - continue - } - - v := virtualClusterInstance - virtualClusters = append(virtualClusters, &VirtualClusterInstanceProject{ - VirtualCluster: &v, - Project: project, - }) - } - return virtualClusters, nil -} diff --git a/pkg/platform/loftclient/naming/naming.go b/pkg/platform/loftclient/naming/naming.go deleted file mode 100644 index e2952bfe2..000000000 --- a/pkg/platform/loftclient/naming/naming.go +++ /dev/null @@ -1,24 +0,0 @@ -package naming - -import ( - "crypto/sha256" - "encoding/hex" - "strings" -) - -func ProjectNamespace(projectName string) string { - return "loft-p-" + projectName -} - -func SafeConcatName(name ...string) string { - return SafeConcatNameMax(name, 63) -} - -func SafeConcatNameMax(name []string, max int) string { - fullPath := strings.Join(name, "-") - if len(fullPath) > max { - digest := sha256.Sum256([]byte(fullPath)) - return fullPath[0:max-8] + "-" + hex.EncodeToString(digest[0:])[0:7] - } - return fullPath -} diff --git a/pkg/platform/loftconfig/variables.go b/pkg/platform/loftconfig/variables.go deleted file mode 100644 index de91b7bb7..000000000 --- a/pkg/platform/loftconfig/variables.go +++ /dev/null @@ -1,21 +0,0 @@ -package config - -import ( - "os" - "time" -) - -const ( - defaultTimeout = 10 * time.Minute - timeoutEnvVariable = "LOFT_TIMEOUT" -) - -func Timeout() time.Duration { - if timeout := os.Getenv(timeoutEnvVariable); timeout != "" { - if parsedTimeout, err := time.ParseDuration(timeout); err == nil { - return parsedTimeout - } - } - - return defaultTimeout -} diff --git a/pkg/platform/loftutils/positional_args.go b/pkg/platform/loftutils/positional_args.go deleted file mode 100644 index 08f9f23fa..000000000 --- a/pkg/platform/loftutils/positional_args.go +++ /dev/null @@ -1,69 +0,0 @@ -package util - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -var ( - SpaceNameOnlyUseLine string - SpaceNameOnlyValidator cobra.PositionalArgs - - VClusterNameOnlyUseLine string - - VClusterNameOnlyValidator cobra.PositionalArgs -) - -func init() { - SpaceNameOnlyUseLine, SpaceNameOnlyValidator = NamedPositionalArgsValidator(true, true, "SPACE_NAME") - VClusterNameOnlyUseLine, VClusterNameOnlyValidator = NamedPositionalArgsValidator(true, true, "VCLUSTER_NAME") -} - -// NamedPositionalArgsValidator returns a cobra.PositionalArgs that returns a helpful -// error message if the arg number doesn't match. -// It also returns a string that can be appended to the cobra useline -// -// Example output for extra arguments with : -// -// $ command arg asdf -// [fatal] command ARG_1 [flags] -// Invalid Args: received 2 arguments, expected 1, extra arguments: "asdf" -// Run with --help for more details -// -// Example output for missing arguments: -// -// $ command -// [fatal] command ARG_1 [flags] -// Invalid Args: received 0 arguments, expected 1, please specify missing: "ARG_!" -// Run with --help for more details on arguments -func NamedPositionalArgsValidator(failMissing, failExtra bool, expectedArgs ...string) (string, cobra.PositionalArgs) { - return " " + strings.Join(expectedArgs, " "), func(cmd *cobra.Command, args []string) error { - numExpectedArgs := len(expectedArgs) - numArgs := len(args) - numMissing := numExpectedArgs - numArgs - - if numMissing == 0 { - return nil - } - - // didn't receive as many arguments as expected - if numMissing > 0 && failMissing { - // the last numMissing expectedArgs - missingKeys := strings.Join(expectedArgs[len(expectedArgs)-(numMissing):], ", ") - return fmt.Errorf("%s\nInvalid Args: received %d arguments, expected %d, please specify missing: %q\nRun with --help for more details on arguments", cmd.UseLine(), numArgs, numExpectedArgs, missingKeys) - } - - // received more than expected - if numMissing < 0 && failExtra { - // received more than expected - numExtra := -numMissing - // the last numExtra args - extraValues := strings.Join(args[len(args)-numExtra:], ", ") - return fmt.Errorf("%s\nInvalid Args: received %d arguments, expected %d, extra arguments: %q\nRun with --help for more details on arguments", cmd.UseLine(), numArgs, numExpectedArgs, extraValues) - } - - return nil - } -} diff --git a/pkg/platform/loftutils/positional_args_test.go b/pkg/platform/loftutils/positional_args_test.go deleted file mode 100644 index ac45cb4d1..000000000 --- a/pkg/platform/loftutils/positional_args_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package util - -import ( - "fmt" - "testing" - - "github.com/spf13/cobra" - "gotest.tools/v3/assert" -) - -func TestNamedPositionalArgsValidator(t *testing.T) { - // loop through a generated variety of inputs: arg counts, expected arg counts, and failMissing - // since it depends on the numbers, it's easier to loop than writing a testable - maxExpectedArgCount := 5 - maxActualArgsCount := maxExpectedArgCount + 5 - expectedArgs := []string{} - testNum := 0 - // loop through maxExpectedArgCount lengths of expectedArgs - for len(expectedArgs) <= maxExpectedArgCount { - actualArgs := []string{} - // loop through maxActualArgCount lengths of actualArgs - for len(actualArgs) <= maxActualArgsCount { - defer func() { - panicErr := recover() - if panicErr != nil { - t.Fatalf("this function should never panic: %+v", panicErr) - } - }() - testNum++ - // loop through both values of failMissing - for _, failMissing := range []bool{true, false} { - for _, failExtra := range []bool{true, false} { - // execute test - t.Logf("running test #%d with failMissing %v, failExtra %v, expectedArgs: %q, args: %q", testNum, failMissing, failExtra, expectedArgs, actualArgs) - // if testNum == 23 { - // t.Log("focus a test number number for debugging") - // } - _, validator := NamedPositionalArgsValidator(failMissing, failExtra, expectedArgs...) - err := validator(&cobra.Command{}, actualArgs) - if len(actualArgs) > len(expectedArgs) && failExtra { - assert.ErrorContains(t, err, "extra arguments:", "expect error to not be nil as arg count is mismatched") - } else if len(actualArgs) < len(expectedArgs) && failMissing { - assert.ErrorContains(t, err, "please specify missing:", "expect error to not be nil as arg count is mismatched") - } else { - assert.NilError(t, err, "expect error to be nil as all args provided and no extra") - } - // append to actual args - actualArgs = append(actualArgs, fmt.Sprintf("ARG_%d", len(actualArgs))) - } - } - } - // append to expected args - expectedArgs = append(expectedArgs, fmt.Sprintf("ARG_NAME_%d", len(expectedArgs))) - } -} diff --git a/pkg/platform/loftutils/util.go b/pkg/platform/loftutils/util.go deleted file mode 100644 index bda639922..000000000 --- a/pkg/platform/loftutils/util.go +++ /dev/null @@ -1,26 +0,0 @@ -package util - -import ( - "errors" - - kerrors "k8s.io/apimachinery/pkg/api/errors" -) - -func GetCause(err error) string { - if err == nil { - return "" - } - - var statusErr *kerrors.StatusError - - if errors.As(err, &statusErr) { - details := statusErr.Status().Details - if details != nil && len(details.Causes) > 0 { - return details.Causes[0].Message - } - - return statusErr.Error() - } - - return err.Error() -} From 6bc176de6f5449bbf16ec02b63efe1683c99db2f Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 14:00:08 +0200 Subject: [PATCH 03/17] rebased on jo's branch --- cmd/vclusterctl/cmd/platform/add/cluster.go | 20 +++--- .../cmd/platform/connect/cluster.go | 61 +++------------- .../cmd/platform/connect/connect.go | 3 +- cmd/vclusterctl/cmd/platform/get/cluster.go | 17 +++-- cmd/vclusterctl/cmd/platform/get/get.go | 5 +- cmd/vclusterctl/cmd/platform/import.go | 2 +- cmd/vclusterctl/cmd/platform/platform.go | 5 +- cmd/vclusterctl/cmd/platform/pro.go | 2 - cmd/vclusterctl/cmd/root.go | 3 +- pkg/platform/client.go | 1 + pkg/platform/loftutils/positional_args.go | 69 +++++++++++++++++++ .../loftutils/positional_args_test.go | 55 +++++++++++++++ pkg/platform/loftutils/util.go | 26 +++++++ 13 files changed, 191 insertions(+), 78 deletions(-) create mode 100644 pkg/platform/loftutils/positional_args.go create mode 100644 pkg/platform/loftutils/positional_args_test.go create mode 100644 pkg/platform/loftutils/util.go diff --git a/cmd/vclusterctl/cmd/platform/add/cluster.go b/cmd/vclusterctl/cmd/platform/add/cluster.go index 8814f302f..2aaa27156 100644 --- a/cmd/vclusterctl/cmd/platform/add/cluster.go +++ b/cmd/vclusterctl/cmd/platform/add/cluster.go @@ -9,18 +9,19 @@ import ( "os/exec" "time" + "github.com/loft-sh/loftctl/v4/pkg/client/helper" + "github.com/loft-sh/loftctl/v4/pkg/clihelper" + "github.com/loft-sh/loftctl/v4/pkg/kube" "github.com/loft-sh/log" - "github.com/loft-sh/vcluster/pkg/platform/clihelper" - "github.com/loft-sh/vcluster/pkg/platform/kube" - client "github.com/loft-sh/vcluster/pkg/platform/loftclient" - "github.com/loft-sh/vcluster/pkg/platform/loftclient/helper" "github.com/sirupsen/logrus" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/wait" managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" + "github.com/loft-sh/vcluster/pkg/cli/config" "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/platform" "github.com/loft-sh/vcluster/pkg/upgrade" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,6 +32,7 @@ import ( type ClusterCmd struct { Log log.Logger *flags.GlobalFlags + Cfg *config.CLI Namespace string ServiceAccount string DisplayName string @@ -89,17 +91,17 @@ func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { // Get clusterName from command argument clusterName := args[0] - baseClient, err := client.NewClientFromPath(cmd.Config) + platformClient, err := platform.NewClientFromConfig(ctx, cmd.Cfg) if err != nil { return fmt.Errorf("new client from path: %w", err) } - err = client.VerifyVersion(baseClient) + err = platform.VerifyVersion(platformClient) if err != nil { return fmt.Errorf("verify loft version: %w", err) } - managementClient, err := baseClient.Management() + managementClient, err := platformClient.Management() if err != nil { return fmt.Errorf("create management client: %w", err) } @@ -110,7 +112,7 @@ func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { return fmt.Errorf("get user or team: %w", err) } - loftVersion, err := baseClient.Version() + loftVersion, err := platformClient.Version() if err != nil { return fmt.Errorf("get loft version: %w", err) } @@ -189,7 +191,7 @@ func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { helmArgs = append(helmArgs, "--set", "token="+accessKey.AccessKey) } - if cmd.Insecure || accessKey.Insecure || baseClient.Config().Insecure { + if cmd.Insecure || accessKey.Insecure { helmArgs = append(helmArgs, "--set", "insecureSkipVerify=true") } diff --git a/cmd/vclusterctl/cmd/platform/connect/cluster.go b/cmd/vclusterctl/cmd/platform/connect/cluster.go index 49e6d08b8..869709074 100644 --- a/cmd/vclusterctl/cmd/platform/connect/cluster.go +++ b/cmd/vclusterctl/cmd/platform/connect/cluster.go @@ -5,15 +5,13 @@ import ( "encoding/base64" "fmt" "os" - "strings" managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/kubeconfig" "github.com/loft-sh/log" "github.com/loft-sh/vcluster/pkg/cli/flags" - "github.com/loft-sh/vcluster/pkg/platform/kubeconfig" - client "github.com/loft-sh/vcluster/pkg/platform/loftclient" - "github.com/loft-sh/vcluster/pkg/platform/loftclient/helper" + "github.com/loft-sh/vcluster/pkg/platform" "github.com/loft-sh/vcluster/pkg/upgrade" "github.com/mgutz/ansi" "github.com/spf13/cobra" @@ -80,7 +78,7 @@ vcluster platform connect cluster mycluster // Run executes the command func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { - baseClient, err := client.NewClientFromPath(cmd.Config) + platformClient, err := platform.NewClientFromConfig(ctx, cmd.LoadedConfig(cmd.log)) if err != nil { return err } @@ -93,7 +91,7 @@ func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { // determine cluster name clusterName := "" if len(args) == 0 { - clusterName, err = helper.SelectCluster(ctx, baseClient, cmd.log) + clusterName, err = platformClient.SelectCluster(ctx, cmd.log) if err != nil { return err } @@ -112,7 +110,7 @@ func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { } // create kube context options - contextOptions, err := CreateClusterContextOptions(baseClient, cmd.Config, cluster, "", cmd.DisableDirectClusterEndpoint, true, cmd.log) + contextOptions, err := CreateClusterContextOptions(platformClient, cmd.Config, cluster, "", cmd.DisableDirectClusterEndpoint, true, cmd.log) if err != nil { return err } @@ -136,43 +134,15 @@ func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { return nil } -//func findProjectCluster(ctx context.Context, baseClient client.Client, projectName, clusterName string) (*managementv1.Cluster, error) { -// managementClient, err := baseClient.Management() -// if err != nil { -// return nil, err -// } -// -// projectClusters, err := managementClient.Loft().ManagementV1().Projects().ListClusters(ctx, projectName, metav1.GetOptions{}) -// if err != nil { -// return nil, errors.Wrap(err, "list project clusters") -// } -// -// for _, cluster := range projectClusters.Clusters { -// if cluster.Name == clusterName { -// return &cluster, nil -// } -// } -// -// return nil, fmt.Errorf("couldn't find cluster %s in project %s", clusterName, projectName) -//} - -func CreateClusterContextOptions(baseClient client.Client, config string, cluster *managementv1.Cluster, spaceName string, disableClusterGateway, setActive bool, log log.Logger) (kubeconfig.ContextOptions, error) { +func CreateClusterContextOptions(platformClient platform.Client, config string, cluster *managementv1.Cluster, spaceName string, disableClusterGateway, setActive bool, log log.Logger) (kubeconfig.ContextOptions, error) { contextOptions := kubeconfig.ContextOptions{ Name: kubeconfig.SpaceContextName(cluster.Name, spaceName), ConfigPath: config, CurrentNamespace: spaceName, SetActive: setActive, } - if !disableClusterGateway && cluster.Annotations != nil && cluster.Annotations[LoftDirectClusterEndpoint] != "" { - contextOptions = ApplyDirectClusterEndpointOptions(contextOptions, cluster, "/kubernetes/cluster", log) - _, err := baseClient.DirectClusterEndpointToken(true) - if err != nil { - return kubeconfig.ContextOptions{}, fmt.Errorf("retrieving direct cluster endpoint token: %w. Use --disable-direct-cluster-endpoint to create a context without using direct cluster endpoints", err) - } - } else { - contextOptions.Server = baseClient.Config().Host + "/kubernetes/cluster/" + cluster.Name - contextOptions.InsecureSkipTLSVerify = baseClient.Config().Insecure - } + contextOptions.Server = platformClient.Config().Platform.Host + "/kubernetes/cluster/" + cluster.Name + contextOptions.InsecureSkipTLSVerify = platformClient.Config().Platform.Insecure data, err := retrieveCaData(cluster) if err != nil { @@ -182,21 +152,6 @@ func CreateClusterContextOptions(baseClient client.Client, config string, cluste return contextOptions, nil } -func ApplyDirectClusterEndpointOptions(options kubeconfig.ContextOptions, cluster *managementv1.Cluster, path string, log log.Logger) kubeconfig.ContextOptions { - server := strings.TrimSuffix(cluster.Annotations[LoftDirectClusterEndpoint], "/") - if !strings.HasPrefix(server, "https://") { - server = "https://" + server - } - - log.Infof("Using direct cluster endpoint at %s", server) - options.Server = server + path - if cluster.Annotations[LoftDirectClusterEndpointInsecure] == "true" { - options.InsecureSkipTLSVerify = true - } - options.DirectClusterEndpointEnabled = true - return options -} - func retrieveCaData(cluster *managementv1.Cluster) ([]byte, error) { if cluster == nil || cluster.Annotations == nil || cluster.Annotations[LoftDirectClusterEndpointCaData] == "" { return nil, nil diff --git a/cmd/vclusterctl/cmd/platform/connect/connect.go b/cmd/vclusterctl/cmd/platform/connect/connect.go index 1b9eafd45..5f775648f 100644 --- a/cmd/vclusterctl/cmd/platform/connect/connect.go +++ b/cmd/vclusterctl/cmd/platform/connect/connect.go @@ -3,12 +3,11 @@ package connect import ( "github.com/loft-sh/api/v4/pkg/product" "github.com/loft-sh/vcluster/pkg/cli/flags" - platformdefaults "github.com/loft-sh/vcluster/pkg/platform/defaults" "github.com/spf13/cobra" ) // NewConnectCmd creates a new cobra command -func NewConnectCmd(globalFlags *flags.GlobalFlags, _ *platformdefaults.Defaults) *cobra.Command { +func NewConnectCmd(globalFlags *flags.GlobalFlags) *cobra.Command { description := product.ReplaceWithHeader("use", ` Activates a kube context for the given cluster / space / vcluster / management. diff --git a/cmd/vclusterctl/cmd/platform/get/cluster.go b/cmd/vclusterctl/cmd/platform/get/cluster.go index 6b6869029..f3fdd8124 100644 --- a/cmd/vclusterctl/cmd/platform/get/cluster.go +++ b/cmd/vclusterctl/cmd/platform/get/cluster.go @@ -8,10 +8,11 @@ import ( "time" managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" + "github.com/loft-sh/loftctl/v4/pkg/config" + cliconfig "github.com/loft-sh/vcluster/pkg/cli/config" "github.com/loft-sh/vcluster/pkg/cli/flags" - client "github.com/loft-sh/vcluster/pkg/platform/loftclient" - "github.com/loft-sh/vcluster/pkg/platform/loftclient/naming" - config "github.com/loft-sh/vcluster/pkg/platform/loftconfig" + "github.com/loft-sh/vcluster/pkg/platform" + "github.com/loft-sh/vcluster/pkg/projectutil" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" @@ -25,11 +26,13 @@ var ( type clusterCmd struct { *flags.GlobalFlags + cfg *cliconfig.CLI } -func newClusterCmd(globalFlags *flags.GlobalFlags) *cobra.Command { +func newClusterCmd(globalFlags *flags.GlobalFlags, cfg *cliconfig.CLI) *cobra.Command { cmd := &clusterCmd{ GlobalFlags: globalFlags, + cfg: cfg, } return &cobra.Command{ @@ -61,7 +64,7 @@ func (c *clusterCmd) Run(ctx context.Context, _ []string) error { isProject, projectName := isProjectContext(cluster) if isProject { - baseClient, err := client.NewClientFromPath(c.Config) + baseClient, err := platform.NewClientFromConfig(ctx, c.cfg) if err != nil { return err } @@ -76,7 +79,7 @@ func (c *clusterCmd) Run(ctx context.Context, _ []string) error { err := wait.PollUntilContextTimeout(ctx, time.Second, config.Timeout(), true, func(ctx context.Context) (bool, error) { var err error - spaceInstance, err = managementClient.Loft().ManagementV1().SpaceInstances(naming.ProjectNamespace(projectName)).Get(ctx, spaceName, metav1.GetOptions{}) + spaceInstance, err = managementClient.Loft().ManagementV1().SpaceInstances(projectutil.ProjectNamespace(projectName)).Get(ctx, spaceName, metav1.GetOptions{}) if err != nil { return false, err } @@ -101,7 +104,7 @@ func (c *clusterCmd) Run(ctx context.Context, _ []string) error { err := wait.PollUntilContextTimeout(ctx, time.Second, config.Timeout(), true, func(ctx context.Context) (bool, error) { var err error - virtualClusterInstance, err = managementClient.Loft().ManagementV1().VirtualClusterInstances(naming.ProjectNamespace(projectName)).Get(ctx, virtualClusterName, metav1.GetOptions{}) + virtualClusterInstance, err = managementClient.Loft().ManagementV1().VirtualClusterInstances(projectutil.ProjectNamespace(projectName)).Get(ctx, virtualClusterName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/cmd/vclusterctl/cmd/platform/get/get.go b/cmd/vclusterctl/cmd/platform/get/get.go index 2ec7b1580..decb21fde 100644 --- a/cmd/vclusterctl/cmd/platform/get/get.go +++ b/cmd/vclusterctl/cmd/platform/get/get.go @@ -2,12 +2,13 @@ package get import ( "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/vcluster/pkg/cli/config" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/spf13/cobra" ) // NewVarsCmd creates a new cobra command for the sub command -func NewVarsCmd(globalFlags *flags.GlobalFlags) *cobra.Command { +func NewVarsCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) *cobra.Command { description := product.ReplaceWithHeader("var", "") cmd := &cobra.Command{ @@ -17,6 +18,6 @@ func NewVarsCmd(globalFlags *flags.GlobalFlags) *cobra.Command { Args: cobra.NoArgs, } - cmd.AddCommand(newClusterCmd(globalFlags)) + cmd.AddCommand(newClusterCmd(globalFlags, cfg)) return cmd } diff --git a/cmd/vclusterctl/cmd/platform/import.go b/cmd/vclusterctl/cmd/platform/import.go index ba44f0fe4..a00ba494f 100644 --- a/cmd/vclusterctl/cmd/platform/import.go +++ b/cmd/vclusterctl/cmd/platform/import.go @@ -7,7 +7,7 @@ import ( "github.com/loft-sh/vcluster/pkg/cli" "github.com/loft-sh/vcluster/pkg/cli/config" "github.com/loft-sh/vcluster/pkg/cli/flags" - "github.com/loft-sh/vcluster/pkg/platform" + loftctlUtil "github.com/loft-sh/vcluster/pkg/platform/loftutils" "github.com/spf13/cobra" ) diff --git a/cmd/vclusterctl/cmd/platform/platform.go b/cmd/vclusterctl/cmd/platform/platform.go index a3073f4d3..65edb97b3 100644 --- a/cmd/vclusterctl/cmd/platform/platform.go +++ b/cmd/vclusterctl/cmd/platform/platform.go @@ -4,11 +4,13 @@ import ( "fmt" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/add" + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/get" + "github.com/loft-sh/vcluster/pkg/cli/config" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/spf13/cobra" ) -func NewPlatformCmd(globalFlags *flags.GlobalFlags) (*cobra.Command, error) { +func NewPlatformCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) (*cobra.Command, error) { platformCmd := &cobra.Command{ Use: "platform", Short: "vCluster platform subcommands", @@ -29,6 +31,7 @@ func NewPlatformCmd(globalFlags *flags.GlobalFlags) (*cobra.Command, error) { platformCmd.AddCommand(add.NewAddCmd(globalFlags)) platformCmd.AddCommand(NewAccessKeyCmd(globalFlags)) platformCmd.AddCommand(NewImportCmd(globalFlags)) + platformCmd.AddCommand(get.NewVarsCmd(globalFlags, cfg)) return platformCmd, nil } diff --git a/cmd/vclusterctl/cmd/platform/pro.go b/cmd/vclusterctl/cmd/platform/pro.go index 342cbd995..241a36d1f 100644 --- a/cmd/vclusterctl/cmd/platform/pro.go +++ b/cmd/vclusterctl/cmd/platform/pro.go @@ -1,8 +1,6 @@ package platform import ( - "fmt" - "github.com/loft-sh/log" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/connect" "github.com/loft-sh/vcluster/pkg/cli/flags" diff --git a/cmd/vclusterctl/cmd/root.go b/cmd/vclusterctl/cmd/root.go index dd6196354..b12e5c370 100644 --- a/cmd/vclusterctl/cmd/root.go +++ b/cmd/vclusterctl/cmd/root.go @@ -86,6 +86,7 @@ func BuildRoot(log log.Logger) (*cobra.Command, error) { rootCmd := NewRootCmd(log) persistentFlags := rootCmd.PersistentFlags() globalFlags = flags.SetGlobalFlags(persistentFlags, log) + cfg := globalFlags.LoadedConfig(log) // Set version for --version flag rootCmd.Version = upgrade.GetVersion() @@ -112,7 +113,7 @@ func BuildRoot(log log.Logger) (*cobra.Command, error) { return nil, fmt.Errorf("failed to create pro command: %w", err) } rootCmd.AddCommand(proCmd) - platformCmd, err := cmdpro.NewPlatformCmd(globalFlags) + platformCmd, err := cmdpro.NewPlatformCmd(globalFlags, cfg) if err != nil { return nil, fmt.Errorf("failed to create platform command: %w", err) } diff --git a/pkg/platform/client.go b/pkg/platform/client.go index b9462133e..cc8a4747d 100644 --- a/pkg/platform/client.go +++ b/pkg/platform/client.go @@ -84,6 +84,7 @@ type Client interface { ResolveTemplate(ctx context.Context, project, template, templateVersion string, setParams []string, fileParams string, log log.Logger) (*managementv1.VirtualClusterTemplate, string, error) SelectProjectOrCluster(ctx context.Context, clusterName, projectName string, allowClusterOnly bool, log log.Logger) (cluster string, project string, err error) + SelectCluster(ctx context.Context, log log.Logger) (string, error) ApplyPlatformSecret(ctx context.Context, kubeClient kubernetes.Interface, importName, namespace, project string) error diff --git a/pkg/platform/loftutils/positional_args.go b/pkg/platform/loftutils/positional_args.go new file mode 100644 index 000000000..08f9f23fa --- /dev/null +++ b/pkg/platform/loftutils/positional_args.go @@ -0,0 +1,69 @@ +package util + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +var ( + SpaceNameOnlyUseLine string + SpaceNameOnlyValidator cobra.PositionalArgs + + VClusterNameOnlyUseLine string + + VClusterNameOnlyValidator cobra.PositionalArgs +) + +func init() { + SpaceNameOnlyUseLine, SpaceNameOnlyValidator = NamedPositionalArgsValidator(true, true, "SPACE_NAME") + VClusterNameOnlyUseLine, VClusterNameOnlyValidator = NamedPositionalArgsValidator(true, true, "VCLUSTER_NAME") +} + +// NamedPositionalArgsValidator returns a cobra.PositionalArgs that returns a helpful +// error message if the arg number doesn't match. +// It also returns a string that can be appended to the cobra useline +// +// Example output for extra arguments with : +// +// $ command arg asdf +// [fatal] command ARG_1 [flags] +// Invalid Args: received 2 arguments, expected 1, extra arguments: "asdf" +// Run with --help for more details +// +// Example output for missing arguments: +// +// $ command +// [fatal] command ARG_1 [flags] +// Invalid Args: received 0 arguments, expected 1, please specify missing: "ARG_!" +// Run with --help for more details on arguments +func NamedPositionalArgsValidator(failMissing, failExtra bool, expectedArgs ...string) (string, cobra.PositionalArgs) { + return " " + strings.Join(expectedArgs, " "), func(cmd *cobra.Command, args []string) error { + numExpectedArgs := len(expectedArgs) + numArgs := len(args) + numMissing := numExpectedArgs - numArgs + + if numMissing == 0 { + return nil + } + + // didn't receive as many arguments as expected + if numMissing > 0 && failMissing { + // the last numMissing expectedArgs + missingKeys := strings.Join(expectedArgs[len(expectedArgs)-(numMissing):], ", ") + return fmt.Errorf("%s\nInvalid Args: received %d arguments, expected %d, please specify missing: %q\nRun with --help for more details on arguments", cmd.UseLine(), numArgs, numExpectedArgs, missingKeys) + } + + // received more than expected + if numMissing < 0 && failExtra { + // received more than expected + numExtra := -numMissing + // the last numExtra args + extraValues := strings.Join(args[len(args)-numExtra:], ", ") + return fmt.Errorf("%s\nInvalid Args: received %d arguments, expected %d, extra arguments: %q\nRun with --help for more details on arguments", cmd.UseLine(), numArgs, numExpectedArgs, extraValues) + } + + return nil + } +} diff --git a/pkg/platform/loftutils/positional_args_test.go b/pkg/platform/loftutils/positional_args_test.go new file mode 100644 index 000000000..ac45cb4d1 --- /dev/null +++ b/pkg/platform/loftutils/positional_args_test.go @@ -0,0 +1,55 @@ +package util + +import ( + "fmt" + "testing" + + "github.com/spf13/cobra" + "gotest.tools/v3/assert" +) + +func TestNamedPositionalArgsValidator(t *testing.T) { + // loop through a generated variety of inputs: arg counts, expected arg counts, and failMissing + // since it depends on the numbers, it's easier to loop than writing a testable + maxExpectedArgCount := 5 + maxActualArgsCount := maxExpectedArgCount + 5 + expectedArgs := []string{} + testNum := 0 + // loop through maxExpectedArgCount lengths of expectedArgs + for len(expectedArgs) <= maxExpectedArgCount { + actualArgs := []string{} + // loop through maxActualArgCount lengths of actualArgs + for len(actualArgs) <= maxActualArgsCount { + defer func() { + panicErr := recover() + if panicErr != nil { + t.Fatalf("this function should never panic: %+v", panicErr) + } + }() + testNum++ + // loop through both values of failMissing + for _, failMissing := range []bool{true, false} { + for _, failExtra := range []bool{true, false} { + // execute test + t.Logf("running test #%d with failMissing %v, failExtra %v, expectedArgs: %q, args: %q", testNum, failMissing, failExtra, expectedArgs, actualArgs) + // if testNum == 23 { + // t.Log("focus a test number number for debugging") + // } + _, validator := NamedPositionalArgsValidator(failMissing, failExtra, expectedArgs...) + err := validator(&cobra.Command{}, actualArgs) + if len(actualArgs) > len(expectedArgs) && failExtra { + assert.ErrorContains(t, err, "extra arguments:", "expect error to not be nil as arg count is mismatched") + } else if len(actualArgs) < len(expectedArgs) && failMissing { + assert.ErrorContains(t, err, "please specify missing:", "expect error to not be nil as arg count is mismatched") + } else { + assert.NilError(t, err, "expect error to be nil as all args provided and no extra") + } + // append to actual args + actualArgs = append(actualArgs, fmt.Sprintf("ARG_%d", len(actualArgs))) + } + } + } + // append to expected args + expectedArgs = append(expectedArgs, fmt.Sprintf("ARG_NAME_%d", len(expectedArgs))) + } +} diff --git a/pkg/platform/loftutils/util.go b/pkg/platform/loftutils/util.go new file mode 100644 index 000000000..bda639922 --- /dev/null +++ b/pkg/platform/loftutils/util.go @@ -0,0 +1,26 @@ +package util + +import ( + "errors" + + kerrors "k8s.io/apimachinery/pkg/api/errors" +) + +func GetCause(err error) string { + if err == nil { + return "" + } + + var statusErr *kerrors.StatusError + + if errors.As(err, &statusErr) { + details := statusErr.Status().Details + if details != nil && len(details.Causes) > 0 { + return details.Causes[0].Message + } + + return statusErr.Error() + } + + return err.Error() +} From 28913552798e233b5ad3a7e6b45f8dcaafbc6f7b Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 14:29:39 +0200 Subject: [PATCH 04/17] fixed import --- cmd/vclusterctl/cmd/platform/list/clusters.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/vclusterctl/cmd/platform/list/clusters.go b/cmd/vclusterctl/cmd/platform/list/clusters.go index bac9cce2d..163916ae5 100644 --- a/cmd/vclusterctl/cmd/platform/list/clusters.go +++ b/cmd/vclusterctl/cmd/platform/list/clusters.go @@ -5,10 +5,10 @@ import ( "time" "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/client" "github.com/loft-sh/log" "github.com/loft-sh/log/table" "github.com/loft-sh/vcluster/pkg/cli/flags" - client "github.com/loft-sh/vcluster/pkg/platform/loftclient" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/duration" From 6235f4e750767dfecbe36ae5374684e4f5fdbdab Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 14:40:08 +0200 Subject: [PATCH 05/17] linting --- pkg/cli/start/start.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/cli/start/start.go b/pkg/cli/start/start.go index 0f25100f4..4de26f3df 100644 --- a/pkg/cli/start/start.go +++ b/pkg/cli/start/start.go @@ -152,7 +152,6 @@ func (l *LoftStarter) prepare(ctx context.Context) error { contextToLoad := kubeConfig.CurrentContext if l.Context != "" { contextToLoad = l.Context - } else if platformConfig.LastInstallContext != "" && platformConfig.LastInstallContext != contextToLoad { contextToLoad, err = l.Log.Question(&survey.QuestionOptions{ Question: product.Replace("Seems like you try to use 'loft start' with a different kubernetes context than before. Please choose which kubernetes context you want to use"), @@ -165,7 +164,6 @@ func (l *LoftStarter) prepare(ctx context.Context) error { } l.Context = contextToLoad - platformConfig.LastInstallContext = contextToLoad if err := platformClient.Save(); err != nil { return fmt.Errorf("save vCluster config: %w", err) From 931bf83f6a76b7631bd80982a0741339ab0155b7 Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 14:50:52 +0200 Subject: [PATCH 06/17] removed disable cluster endpoint --- cmd/vclusterctl/cmd/platform/connect/cluster.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/cmd/vclusterctl/cmd/platform/connect/cluster.go b/cmd/vclusterctl/cmd/platform/connect/cluster.go index 869709074..401274ea3 100644 --- a/cmd/vclusterctl/cmd/platform/connect/cluster.go +++ b/cmd/vclusterctl/cmd/platform/connect/cluster.go @@ -72,7 +72,6 @@ vcluster platform connect cluster mycluster } c.Flags().BoolVar(&cmd.Print, "print", false, "When enabled prints the context to stdout") - c.Flags().BoolVar(&cmd.DisableDirectClusterEndpoint, "disable-direct-cluster-endpoint", false, "When enabled does not use an available direct cluster endpoint to connect to the cluster") return c } @@ -110,7 +109,7 @@ func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { } // create kube context options - contextOptions, err := CreateClusterContextOptions(platformClient, cmd.Config, cluster, "", cmd.DisableDirectClusterEndpoint, true, cmd.log) + contextOptions, err := CreateClusterContextOptions(platformClient, cmd.Config, cluster, "", true, cmd.log) if err != nil { return err } @@ -134,7 +133,7 @@ func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { return nil } -func CreateClusterContextOptions(platformClient platform.Client, config string, cluster *managementv1.Cluster, spaceName string, disableClusterGateway, setActive bool, log log.Logger) (kubeconfig.ContextOptions, error) { +func CreateClusterContextOptions(platformClient platform.Client, config string, cluster *managementv1.Cluster, spaceName string, setActive bool, log log.Logger) (kubeconfig.ContextOptions, error) { contextOptions := kubeconfig.ContextOptions{ Name: kubeconfig.SpaceContextName(cluster.Name, spaceName), ConfigPath: config, From 369003fc0106bd8bd15cb4c7137fe135b9bc17c7 Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 14:58:56 +0200 Subject: [PATCH 07/17] linting --- cmd/vclusterctl/cmd/platform/connect/cluster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/vclusterctl/cmd/platform/connect/cluster.go b/cmd/vclusterctl/cmd/platform/connect/cluster.go index 401274ea3..1001e5986 100644 --- a/cmd/vclusterctl/cmd/platform/connect/cluster.go +++ b/cmd/vclusterctl/cmd/platform/connect/cluster.go @@ -109,7 +109,7 @@ func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { } // create kube context options - contextOptions, err := CreateClusterContextOptions(platformClient, cmd.Config, cluster, "", true, cmd.log) + contextOptions, err := CreateClusterContextOptions(platformClient, cmd.Config, cluster, "", true) if err != nil { return err } @@ -133,7 +133,7 @@ func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { return nil } -func CreateClusterContextOptions(platformClient platform.Client, config string, cluster *managementv1.Cluster, spaceName string, setActive bool, log log.Logger) (kubeconfig.ContextOptions, error) { +func CreateClusterContextOptions(platformClient platform.Client, config string, cluster *managementv1.Cluster, spaceName string, setActive bool) (kubeconfig.ContextOptions, error) { contextOptions := kubeconfig.ContextOptions{ Name: kubeconfig.SpaceContextName(cluster.Name, spaceName), ConfigPath: config, From a448128117966b808122bb2d226437b82b356ea4 Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 15:19:25 +0200 Subject: [PATCH 08/17] added missing commands and renamed some --- cmd/vclusterctl/cmd/platform/platform.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/vclusterctl/cmd/platform/platform.go b/cmd/vclusterctl/cmd/platform/platform.go index 65edb97b3..f942b555a 100644 --- a/cmd/vclusterctl/cmd/platform/platform.go +++ b/cmd/vclusterctl/cmd/platform/platform.go @@ -4,7 +4,9 @@ import ( "fmt" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/add" + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/connect" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/get" + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/list" "github.com/loft-sh/vcluster/pkg/cli/config" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/spf13/cobra" @@ -31,7 +33,9 @@ func NewPlatformCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) (*cobra.Com platformCmd.AddCommand(add.NewAddCmd(globalFlags)) platformCmd.AddCommand(NewAccessKeyCmd(globalFlags)) platformCmd.AddCommand(NewImportCmd(globalFlags)) - platformCmd.AddCommand(get.NewVarsCmd(globalFlags, cfg)) + platformCmd.AddCommand(get.NewGetCmd(globalFlags, cfg)) + platformCmd.AddCommand(connect.NewConnectCmd(globalFlags)) + platformCmd.AddCommand(list.NewListCmd(globalFlags)) return platformCmd, nil } From 75552c138e7318645fa4d85e237e5dc2b927d350 Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 15:46:06 +0200 Subject: [PATCH 09/17] added missing dependencies --- cmd/vclusterctl/cmd/platform/add/cluster.go | 6 +- .../cmd/platform/connect/cluster.go | 2 +- .../cmd/platform/connect/connect.go | 6 +- cmd/vclusterctl/cmd/platform/get/cluster.go | 4 +- cmd/vclusterctl/cmd/platform/get/get.go | 4 +- cmd/vclusterctl/cmd/platform/list/clusters.go | 11 +- cmd/vclusterctl/cmd/platform/list/list.go | 3 +- cmd/vclusterctl/cmd/platform/platform.go | 2 +- cmd/vclusterctl/cmd/platform/reset.go | 2 +- pkg/platform/helper/helper.go | 1160 +++++++++++++++++ pkg/platform/kube/client.go | 54 + pkg/platform/kubeconfig/kubeconfig.go | 266 ++++ pkg/platform/platformclihelper/clihelper.go | 774 +++++++++++ 13 files changed, 2276 insertions(+), 18 deletions(-) create mode 100644 pkg/platform/helper/helper.go create mode 100644 pkg/platform/kube/client.go create mode 100644 pkg/platform/kubeconfig/kubeconfig.go create mode 100644 pkg/platform/platformclihelper/clihelper.go diff --git a/cmd/vclusterctl/cmd/platform/add/cluster.go b/cmd/vclusterctl/cmd/platform/add/cluster.go index 2aaa27156..94d701cf8 100644 --- a/cmd/vclusterctl/cmd/platform/add/cluster.go +++ b/cmd/vclusterctl/cmd/platform/add/cluster.go @@ -9,9 +9,6 @@ import ( "os/exec" "time" - "github.com/loft-sh/loftctl/v4/pkg/client/helper" - "github.com/loft-sh/loftctl/v4/pkg/clihelper" - "github.com/loft-sh/loftctl/v4/pkg/kube" "github.com/loft-sh/log" "github.com/sirupsen/logrus" kerrors "k8s.io/apimachinery/pkg/api/errors" @@ -22,6 +19,9 @@ import ( "github.com/loft-sh/vcluster/pkg/cli/config" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/loft-sh/vcluster/pkg/platform" + "github.com/loft-sh/vcluster/pkg/platform/helper" + "github.com/loft-sh/vcluster/pkg/platform/kube" + clihelper "github.com/loft-sh/vcluster/pkg/platform/platformclihelper" "github.com/loft-sh/vcluster/pkg/upgrade" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/cmd/vclusterctl/cmd/platform/connect/cluster.go b/cmd/vclusterctl/cmd/platform/connect/cluster.go index 1001e5986..81ad8f5c6 100644 --- a/cmd/vclusterctl/cmd/platform/connect/cluster.go +++ b/cmd/vclusterctl/cmd/platform/connect/cluster.go @@ -8,10 +8,10 @@ import ( managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" "github.com/loft-sh/api/v4/pkg/product" - "github.com/loft-sh/loftctl/v4/pkg/kubeconfig" "github.com/loft-sh/log" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/loft-sh/vcluster/pkg/platform" + "github.com/loft-sh/vcluster/pkg/platform/kubeconfig" "github.com/loft-sh/vcluster/pkg/upgrade" "github.com/mgutz/ansi" "github.com/spf13/cobra" diff --git a/cmd/vclusterctl/cmd/platform/connect/connect.go b/cmd/vclusterctl/cmd/platform/connect/connect.go index 5f775648f..b201f8dcf 100644 --- a/cmd/vclusterctl/cmd/platform/connect/connect.go +++ b/cmd/vclusterctl/cmd/platform/connect/connect.go @@ -12,13 +12,13 @@ func NewConnectCmd(globalFlags *flags.GlobalFlags) *cobra.Command { Activates a kube context for the given cluster / space / vcluster / management. `) - useCmd := &cobra.Command{ + connectCmd := &cobra.Command{ Use: "connect", Short: product.Replace("Uses loft resources"), Long: description, Args: cobra.NoArgs, } - useCmd.AddCommand(NewClusterCmd(globalFlags)) - return useCmd + connectCmd.AddCommand(NewClusterCmd(globalFlags)) + return connectCmd } diff --git a/cmd/vclusterctl/cmd/platform/get/cluster.go b/cmd/vclusterctl/cmd/platform/get/cluster.go index f3fdd8124..2f796c6de 100644 --- a/cmd/vclusterctl/cmd/platform/get/cluster.go +++ b/cmd/vclusterctl/cmd/platform/get/cluster.go @@ -64,12 +64,12 @@ func (c *clusterCmd) Run(ctx context.Context, _ []string) error { isProject, projectName := isProjectContext(cluster) if isProject { - baseClient, err := platform.NewClientFromConfig(ctx, c.cfg) + platformClient, err := platform.NewClientFromConfig(ctx, c.cfg) if err != nil { return err } - managementClient, err := baseClient.Management() + managementClient, err := platformClient.Management() if err != nil { return err } diff --git a/cmd/vclusterctl/cmd/platform/get/get.go b/cmd/vclusterctl/cmd/platform/get/get.go index decb21fde..8147cd995 100644 --- a/cmd/vclusterctl/cmd/platform/get/get.go +++ b/cmd/vclusterctl/cmd/platform/get/get.go @@ -7,8 +7,8 @@ import ( "github.com/spf13/cobra" ) -// NewVarsCmd creates a new cobra command for the sub command -func NewVarsCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) *cobra.Command { +// NewGetCmd creates a new cobra command for the sub command +func NewGetCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) *cobra.Command { description := product.ReplaceWithHeader("var", "") cmd := &cobra.Command{ diff --git a/cmd/vclusterctl/cmd/platform/list/clusters.go b/cmd/vclusterctl/cmd/platform/list/clusters.go index 163916ae5..5b6d0c2bc 100644 --- a/cmd/vclusterctl/cmd/platform/list/clusters.go +++ b/cmd/vclusterctl/cmd/platform/list/clusters.go @@ -5,10 +5,11 @@ import ( "time" "github.com/loft-sh/api/v4/pkg/product" - "github.com/loft-sh/loftctl/v4/pkg/client" "github.com/loft-sh/log" "github.com/loft-sh/log/table" + "github.com/loft-sh/vcluster/pkg/cli/config" "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/platform" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/duration" @@ -19,13 +20,15 @@ type ClustersCmd struct { *flags.GlobalFlags log log.Logger + cfg *config.CLI } // NewClustersCmd creates a new spaces command -func NewClustersCmd(globalFlags *flags.GlobalFlags) *cobra.Command { +func NewClustersCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) *cobra.Command { cmd := &ClustersCmd{ GlobalFlags: globalFlags, log: log.GetInstance(), + cfg: cfg, } description := product.ReplaceWithHeader("list clusters", ` List the vcluster platform clusters you have access to @@ -49,12 +52,12 @@ vcluster platform list clusters // RunClusters executes the functionality func (cmd *ClustersCmd) RunClusters(ctx context.Context) error { - baseClient, err := client.NewClientFromPath(cmd.Config) + platformClient, err := platform.NewClientFromConfig(ctx, cmd.cfg) if err != nil { return err } - managementClient, err := baseClient.Management() + managementClient, err := platformClient.Management() if err != nil { return err } diff --git a/cmd/vclusterctl/cmd/platform/list/list.go b/cmd/vclusterctl/cmd/platform/list/list.go index efe08962b..4e84dfa6e 100644 --- a/cmd/vclusterctl/cmd/platform/list/list.go +++ b/cmd/vclusterctl/cmd/platform/list/list.go @@ -3,12 +3,13 @@ package list import ( "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/vcluster/pkg/cli/config" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/spf13/cobra" ) // NewListCmd creates a new cobra command -func NewListCmd(globalFlags *flags.GlobalFlags) *cobra.Command { +func NewListCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) *cobra.Command { description := product.ReplaceWithHeader("list", "") listCmd := &cobra.Command{ Use: "list", diff --git a/cmd/vclusterctl/cmd/platform/platform.go b/cmd/vclusterctl/cmd/platform/platform.go index f942b555a..6e2aed704 100644 --- a/cmd/vclusterctl/cmd/platform/platform.go +++ b/cmd/vclusterctl/cmd/platform/platform.go @@ -35,7 +35,7 @@ func NewPlatformCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) (*cobra.Com platformCmd.AddCommand(NewImportCmd(globalFlags)) platformCmd.AddCommand(get.NewGetCmd(globalFlags, cfg)) platformCmd.AddCommand(connect.NewConnectCmd(globalFlags)) - platformCmd.AddCommand(list.NewListCmd(globalFlags)) + platformCmd.AddCommand(list.NewListCmd(globalFlags, cfg)) return platformCmd, nil } diff --git a/cmd/vclusterctl/cmd/platform/reset.go b/cmd/vclusterctl/cmd/platform/reset.go index cd6d9d472..e215543fd 100644 --- a/cmd/vclusterctl/cmd/platform/reset.go +++ b/cmd/vclusterctl/cmd/platform/reset.go @@ -7,11 +7,11 @@ import ( "strings" storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" - "github.com/loft-sh/loftctl/v4/pkg/kube" "github.com/loft-sh/loftctl/v4/pkg/random" "github.com/loft-sh/log" "github.com/loft-sh/log/survey" "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/platform/kube" "github.com/pkg/errors" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" diff --git a/pkg/platform/helper/helper.go b/pkg/platform/helper/helper.go new file mode 100644 index 000000000..3fc70b39b --- /dev/null +++ b/pkg/platform/helper/helper.go @@ -0,0 +1,1160 @@ +package helper + +import ( + "context" + "errors" + "fmt" + "os" + "sort" + "strings" + + "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset/scheme" + "github.com/loft-sh/loftctl/v4/pkg/client/naming" + authorizationv1 "k8s.io/api/authorization/v1" + + clusterv1 "github.com/loft-sh/agentapi/v4/pkg/apis/loft/cluster/v1" + managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" + "github.com/loft-sh/loftctl/v4/pkg/client" + "github.com/loft-sh/loftctl/v4/pkg/clihelper" + "github.com/loft-sh/loftctl/v4/pkg/kube" + "github.com/loft-sh/loftctl/v4/pkg/kubeconfig" + "github.com/loft-sh/log" + "github.com/loft-sh/log/survey" + "github.com/mgutz/ansi" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubectl/pkg/util/term" +) + +var errNoClusterAccess = errors.New("the user has no access to any cluster") + +type VirtualClusterInstanceProject struct { + VirtualCluster *managementv1.VirtualClusterInstance + Project *managementv1.Project +} + +type SpaceInstanceProject struct { + SpaceInstance *managementv1.SpaceInstance + Project *managementv1.Project +} + +func SelectVirtualClusterTemplate(ctx context.Context, baseClient client.Client, projectName, templateName string, log log.Logger) (*managementv1.VirtualClusterTemplate, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectTemplates, err := managementClient.Loft().ManagementV1().Projects().ListTemplates(ctx, projectName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // select default template + if templateName == "" && projectTemplates.DefaultVirtualClusterTemplate != "" { + templateName = projectTemplates.DefaultVirtualClusterTemplate + } + + // try to find template + if templateName != "" { + for _, virtualClusterTemplate := range projectTemplates.VirtualClusterTemplates { + if virtualClusterTemplate.Name == templateName { + return &virtualClusterTemplate, nil + } + } + + return nil, fmt.Errorf("couldn't find template %s as allowed template in project %s", templateName, projectName) + } else if len(projectTemplates.VirtualClusterTemplates) == 0 { + return nil, fmt.Errorf("there are no allowed virtual cluster templates in project %s", projectName) + } else if len(projectTemplates.VirtualClusterTemplates) == 1 { + return &projectTemplates.VirtualClusterTemplates[0], nil + } + + templateNames := []string{} + for _, template := range projectTemplates.VirtualClusterTemplates { + templateNames = append(templateNames, clihelper.GetDisplayName(template.Name, template.Spec.DisplayName)) + } + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a template to use", + DefaultValue: templateNames[0], + Options: templateNames, + }) + if err != nil { + return nil, err + } + for _, template := range projectTemplates.VirtualClusterTemplates { + if answer == clihelper.GetDisplayName(template.Name, template.Spec.DisplayName) { + return &template, nil + } + } + + return nil, fmt.Errorf("answer not found") +} + +func SelectSpaceTemplate(ctx context.Context, baseClient client.Client, projectName, templateName string, log log.Logger) (*managementv1.SpaceTemplate, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectTemplates, err := managementClient.Loft().ManagementV1().Projects().ListTemplates(ctx, projectName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // select default template + if templateName == "" && projectTemplates.DefaultSpaceTemplate != "" { + templateName = projectTemplates.DefaultSpaceTemplate + } + + // try to find template + if templateName != "" { + for _, spaceTemplate := range projectTemplates.SpaceTemplates { + if spaceTemplate.Name == templateName { + return &spaceTemplate, nil + } + } + + return nil, fmt.Errorf("couldn't find template %s as allowed template in project %s", templateName, projectName) + } else if len(projectTemplates.SpaceTemplates) == 0 { + return nil, fmt.Errorf("there are no allowed space templates in project %s", projectName) + } else if len(projectTemplates.SpaceTemplates) == 1 { + return &projectTemplates.SpaceTemplates[0], nil + } + + templateNames := []string{} + for _, template := range projectTemplates.SpaceTemplates { + templateNames = append(templateNames, clihelper.GetDisplayName(template.Name, template.Spec.DisplayName)) + } + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a template to use", + DefaultValue: templateNames[0], + Options: templateNames, + }) + if err != nil { + return nil, err + } + for _, template := range projectTemplates.SpaceTemplates { + if answer == clihelper.GetDisplayName(template.Name, template.Spec.DisplayName) { + return &template, nil + } + } + + return nil, fmt.Errorf("answer not found") +} + +func SelectVirtualClusterInstanceOrVirtualCluster(ctx context.Context, baseClient client.Client, virtualClusterName, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, string, error) { + if clusterName != "" || spaceName != "" { + virtualCluster, space, cluster, err := SelectVirtualClusterAndSpaceAndClusterName(ctx, baseClient, virtualClusterName, spaceName, clusterName, log) + return cluster, "", space, virtualCluster, err + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", "", "", "", err + } + + // gather projects and virtual cluster instances to access + var projects []*managementv1.Project + if projectName != "" { + project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return "", "", "", "", fmt.Errorf("couldn't find or access project %s", projectName) + } + + return "", "", "", "", err + } + + projects = append(projects, project) + } else { + projectsList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil || len(projectsList.Items) == 0 { + virtualCluster, space, cluster, err := SelectVirtualClusterAndSpaceAndClusterName(ctx, baseClient, virtualClusterName, spaceName, clusterName, log) + return cluster, "", space, virtualCluster, err + } + + for _, p := range projectsList.Items { + proj := p + projects = append(projects, &proj) + } + } + + // gather space instances in those projects + var virtualClusters []*VirtualClusterInstanceProject + for _, p := range projects { + if virtualClusterName != "" { + virtualClusterInstance, err := getProjectVirtualClusterInstance(ctx, managementClient, p, virtualClusterName) + if err != nil { + continue + } + + virtualClusters = append(virtualClusters, virtualClusterInstance) + } else { + projectVirtualClusters, err := getProjectVirtualClusterInstances(ctx, managementClient, p) + if err != nil { + continue + } + + virtualClusters = append(virtualClusters, projectVirtualClusters...) + } + } + + // get unformatted options + var optionsUnformatted [][]string + for _, virtualCluster := range virtualClusters { + optionsUnformatted = append(optionsUnformatted, []string{"vcluster: " + clihelper.GetDisplayName(virtualCluster.VirtualCluster.Name, virtualCluster.VirtualCluster.Spec.DisplayName), "Project: " + clihelper.GetDisplayName(virtualCluster.Project.Name, virtualCluster.Project.Spec.DisplayName)}) + } + + // check if there are virtualclusters + if len(virtualClusters) == 0 { + if virtualClusterName != "" { + return "", "", "", "", fmt.Errorf("couldn't find or access virtual cluster %s", virtualClusterName) + } + return "", "", "", "", fmt.Errorf("couldn't find a virtual cluster you have access to") + } else if len(virtualClusters) == 1 { + return "", virtualClusters[0].Project.Name, "", virtualClusters[0].VirtualCluster.Name, nil + } + + questionOptions := formatOptions("%s | %s", optionsUnformatted) + selectedOption, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a virtual cluster", + DefaultValue: questionOptions[0], + Options: questionOptions, + }) + if err != nil { + return "", "", "", "", err + } + + for idx, s := range questionOptions { + if s == selectedOption { + return "", virtualClusters[idx].Project.Name, "", virtualClusters[idx].VirtualCluster.Name, nil + } + } + + return "", "", "", "", fmt.Errorf("couldn't find answer") +} + +func SelectSpaceInstanceOrSpace(ctx context.Context, baseClient client.Client, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, error) { + if clusterName != "" { + space, cluster, err := SelectSpaceAndClusterName(ctx, baseClient, spaceName, clusterName, log) + return cluster, "", space, err + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", "", "", err + } + + // gather projects and space instances to access + var projects []*managementv1.Project + if projectName != "" { + project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return "", "", "", fmt.Errorf("couldn't find or access project %s", projectName) + } + + return "", "", "", err + } + + projects = append(projects, project) + } else { + projectsList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil || len(projectsList.Items) == 0 { + space, cluster, err := SelectSpaceAndClusterName(ctx, baseClient, spaceName, clusterName, log) + return cluster, "", space, err + } + + for _, p := range projectsList.Items { + proj := p + projects = append(projects, &proj) + } + } + + // gather space instances in those projects + var spaces []*SpaceInstanceProject + for _, p := range projects { + if spaceName != "" { + spaceInstance, err := getProjectSpaceInstance(ctx, managementClient, p, spaceName) + if err != nil { + continue + } + + spaces = append(spaces, spaceInstance) + } else { + projectSpaceInstances, err := getProjectSpaceInstances(ctx, managementClient, p) + if err != nil { + continue + } + + spaces = append(spaces, projectSpaceInstances...) + } + } + + // get unformatted options + var optionsUnformatted [][]string + for _, space := range spaces { + optionsUnformatted = append(optionsUnformatted, []string{"Space: " + clihelper.GetDisplayName(space.SpaceInstance.Name, space.SpaceInstance.Spec.DisplayName), "Project: " + clihelper.GetDisplayName(space.Project.Name, space.Project.Spec.DisplayName)}) + } + + // check if there are spaces + if len(spaces) == 0 { + if spaceName != "" { + return "", "", "", fmt.Errorf("couldn't find or access space %s", spaceName) + } + return "", "", "", fmt.Errorf("couldn't find a space you have access to") + } else if len(spaces) == 1 { + return spaces[0].SpaceInstance.Spec.ClusterRef.Cluster, spaces[0].Project.Name, spaces[0].SpaceInstance.Name, nil + } + + questionOptions := formatOptions("%s | %s", optionsUnformatted) + selectedOption, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a space", + DefaultValue: questionOptions[0], + Options: questionOptions, + }) + if err != nil { + return "", "", "", err + } + + for idx, s := range questionOptions { + if s == selectedOption { + return spaces[idx].SpaceInstance.Spec.ClusterRef.Cluster, spaces[idx].Project.Name, spaces[idx].SpaceInstance.Name, nil + } + } + + return "", "", "", fmt.Errorf("couldn't find answer") +} + +func SelectProjectOrCluster(ctx context.Context, baseClient client.Client, clusterName, projectName string, allowClusterOnly bool, log log.Logger) (cluster string, project string, err error) { + if projectName != "" { + return clusterName, projectName, nil + } else if allowClusterOnly && clusterName != "" { + return clusterName, "", nil + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", "", err + } + + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return "", "", err + } + + projectNames := []string{} + for _, project := range projectList.Items { + projectNames = append(projectNames, clihelper.GetDisplayName(project.Name, project.Spec.DisplayName)) + } + + if len(projectNames) == 0 { + cluster, err := SelectCluster(ctx, baseClient, log) + if err != nil { + if errors.Is(err, errNoClusterAccess) { + return "", "", fmt.Errorf("the user has no access to a project") + } + + return "", "", err + } + + return cluster, "", nil + } + + var selectedProject *managementv1.Project + if len(projectNames) == 1 { + selectedProject = &projectList.Items[0] + } else { + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a project to use", + DefaultValue: projectNames[0], + Options: projectNames, + }) + if err != nil { + return "", "", err + } + for idx, project := range projectList.Items { + if answer == clihelper.GetDisplayName(project.Name, project.Spec.DisplayName) { + selectedProject = &projectList.Items[idx] + } + } + if selectedProject == nil { + return "", "", fmt.Errorf("answer not found") + } + } + + if clusterName == "" { + clusterName, err = SelectProjectCluster(ctx, baseClient, selectedProject, log) + return clusterName, selectedProject.Name, err + } + + return clusterName, selectedProject.Name, nil +} + +// SelectCluster lets the user select a cluster +func SelectCluster(ctx context.Context, baseClient client.Client, log log.Logger) (string, error) { + managementClient, err := baseClient.Management() + if err != nil { + return "", err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return "", err + } + + clusterNames := []string{} + for _, cluster := range clusterList.Items { + clusterNames = append(clusterNames, clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName)) + } + + if len(clusterList.Items) == 0 { + return "", errNoClusterAccess + } else if len(clusterList.Items) == 1 { + return clusterList.Items[0].Name, nil + } + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a cluster to use", + DefaultValue: clusterNames[0], + Options: clusterNames, + }) + if err != nil { + return "", err + } + for _, cluster := range clusterList.Items { + if answer == clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName) { + return cluster.Name, nil + } + } + return "", fmt.Errorf("answer not found") +} + +// SelectProjectCluster lets the user select a cluster from the project's allowed clusters +func SelectProjectCluster(ctx context.Context, baseClient client.Client, project *managementv1.Project, log log.Logger) (string, error) { + if !term.IsTerminal(os.Stdin) { + // Allow loft to schedule as before + return "", nil + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", err + } + + clusterList, err := managementClient.Loft().ManagementV1().Projects().ListClusters(ctx, project.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + anyClusterOption := "Any Cluster [Loft Selects Cluster]" + clusterNames := []string{} + for _, allowedCluster := range project.Spec.AllowedClusters { + if allowedCluster.Name == "*" { + clusterNames = append(clusterNames, anyClusterOption) + break + } + } + + for _, cluster := range clusterList.Clusters { + clusterNames = append(clusterNames, clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName)) + } + + if len(clusterList.Clusters) == 0 { + return "", errNoClusterAccess + } else if len(clusterList.Clusters) == 1 { + return clusterList.Clusters[0].Name, nil + } + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a cluster to use", + DefaultValue: clusterNames[0], + Options: clusterNames, + }) + if err != nil { + return "", err + } + + if answer == anyClusterOption { + return "", nil + } + + for _, cluster := range clusterList.Clusters { + if answer == clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName) { + return cluster.Name, nil + } + } + return "", fmt.Errorf("answer not found") +} + +// SelectUserOrTeam lets the user select an user or team in a cluster +func SelectUserOrTeam(ctx context.Context, baseClient client.Client, clusterName string, log log.Logger) (*clusterv1.EntityInfo, *clusterv1.EntityInfo, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, nil, err + } + + clusterAccess, err := managementClient.Loft().ManagementV1().Clusters().ListAccess(ctx, clusterName, metav1.GetOptions{}) + if err != nil { + return nil, nil, err + } + + var user *clusterv1.EntityInfo + if len(clusterAccess.Users) > 0 { + user = &clusterAccess.Users[0].Info + } + + teams := []*clusterv1.EntityInfo{} + for _, team := range clusterAccess.Teams { + t := team + teams = append(teams, &t.Info) + } + + if user == nil && len(teams) == 0 { + return nil, nil, fmt.Errorf("the user has no access to cluster %s", clusterName) + } else if user != nil && len(teams) == 0 { + return user, nil, nil + } else if user == nil && len(teams) == 1 { + return nil, teams[0], nil + } + + names := []string{} + if user != nil { + names = append(names, "User "+clihelper.DisplayName(user)) + } + for _, t := range teams { + names = append(names, "Team "+clihelper.DisplayName(t)) + } + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a user or team to use", + DefaultValue: names[0], + Options: names, + }) + if err != nil { + return nil, nil, err + } + + if user != nil && "User "+clihelper.DisplayName(user) == answer { + return user, nil, nil + } + for _, t := range teams { + if "Team "+clihelper.DisplayName(t) == answer { + return nil, t, nil + } + } + + return nil, nil, fmt.Errorf("answer not found") +} + +type ClusterUserOrTeam struct { + Team bool + ClusterMember managementv1.ClusterMember +} + +func SelectClusterUserOrTeam(ctx context.Context, baseClient client.Client, clusterName, userName, teamName string, log log.Logger) (*ClusterUserOrTeam, error) { + if userName != "" && teamName != "" { + return nil, fmt.Errorf("team and user specified, please only choose one") + } + + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + members, err := managementClient.Loft().ManagementV1().Clusters().ListMembers(ctx, clusterName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("retrieve cluster members: %w", err) + } + + matchedMembers := []ClusterUserOrTeam{} + optionsUnformatted := [][]string{} + for _, user := range members.Users { + if teamName != "" { + continue + } else if userName != "" && user.Info.Name != userName { + continue + } + + matchedMembers = append(matchedMembers, ClusterUserOrTeam{ + ClusterMember: user, + }) + displayName := user.Info.DisplayName + if displayName == "" { + displayName = user.Info.Name + } + + optionsUnformatted = append(optionsUnformatted, []string{"User: " + displayName, "Kube User: " + user.Info.Name}) + } + for _, team := range members.Teams { + if userName != "" { + continue + } else if teamName != "" && team.Info.Name != teamName { + continue + } + + matchedMembers = append(matchedMembers, ClusterUserOrTeam{ + Team: true, + ClusterMember: team, + }) + displayName := team.Info.DisplayName + if displayName == "" { + displayName = team.Info.Name + } + + optionsUnformatted = append(optionsUnformatted, []string{"Team: " + displayName, "Kube Team: " + team.Info.Name}) + } + + questionOptions := formatOptions("%s | %s", optionsUnformatted) + if len(questionOptions) == 0 { + if userName == "" && teamName == "" { + return nil, fmt.Errorf("couldn't find any space") + } else if userName != "" { + return nil, fmt.Errorf("couldn't find user %s in cluster %s", ansi.Color(userName, "white+b"), ansi.Color(clusterName, "white+b")) + } + + return nil, fmt.Errorf("couldn't find team %s in cluster %s", ansi.Color(teamName, "white+b"), ansi.Color(clusterName, "white+b")) + } else if len(questionOptions) == 1 { + return &matchedMembers[0], nil + } + + selectedMember, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a user or team", + DefaultValue: questionOptions[0], + Options: questionOptions, + }) + if err != nil { + return nil, err + } + + for idx, s := range questionOptions { + if s == selectedMember { + return &matchedMembers[idx], nil + } + } + + return nil, fmt.Errorf("selected question option not found") +} + +func GetVirtualClusterInstances(ctx context.Context, baseClient client.Client) ([]*VirtualClusterInstanceProject, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + var retVClusters []*VirtualClusterInstanceProject + for _, project := range projectList.Items { + p := &project + + virtualClusterInstances, err := getProjectVirtualClusterInstances(ctx, managementClient, p) + if err != nil { + return nil, err + } + + retVClusters = append(retVClusters, virtualClusterInstances...) + } + + return retVClusters, nil +} + +func CanAccessProjectSecret(ctx context.Context, managementClient kube.Interface, namespace, name string) (bool, error) { + return CanAccessInstance(ctx, managementClient, namespace, name, "projectsecrets") +} + +func CanAccessInstance(ctx context.Context, managementClient kube.Interface, namespace, name string, resource string) (bool, error) { + selfSubjectAccessReview, err := managementClient.Loft().ManagementV1().SelfSubjectAccessReviews().Create(ctx, &managementv1.SelfSubjectAccessReview{ + Spec: managementv1.SelfSubjectAccessReviewSpec{ + SelfSubjectAccessReviewSpec: authorizationv1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Verb: "use", + Group: managementv1.SchemeGroupVersion.Group, + Version: managementv1.SchemeGroupVersion.Version, + Resource: resource, + Namespace: namespace, + Name: name, + }, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return false, err + } else if !selfSubjectAccessReview.Status.Allowed || selfSubjectAccessReview.Status.Denied { + return false, nil + } + return true, nil +} + +func GetSpaceInstances(ctx context.Context, baseClient client.Client) ([]*SpaceInstanceProject, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + var retSpaces []*SpaceInstanceProject + for _, project := range projectList.Items { + p := &project + + spaceInstances, err := getProjectSpaceInstances(ctx, managementClient, p) + if err != nil { + return nil, err + } + + retSpaces = append(retSpaces, spaceInstances...) + } + + return retSpaces, nil +} + +type ProjectProjectSecret struct { + ProjectSecret managementv1.ProjectSecret + Project string +} + +func GetProjectSecrets(ctx context.Context, managementClient kube.Interface, projectNames ...string) ([]*ProjectProjectSecret, error) { + var projects []*managementv1.Project + if len(projectNames) == 0 { + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for idx := range projectList.Items { + projectItem := projectList.Items[idx] + projects = append(projects, &projectItem) + } + } else { + for _, projectName := range projectNames { + project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + projects = append(projects, project) + } + } + + var retSecrets []*ProjectProjectSecret + for _, project := range projects { + projectSecrets, err := managementClient.Loft().ManagementV1().ProjectSecrets(naming.ProjectNamespace(project.Name)).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for _, projectSecret := range projectSecrets.Items { + canAccess, err := CanAccessProjectSecret(ctx, managementClient, projectSecret.Namespace, projectSecret.Name) + if err != nil { + return nil, err + } else if !canAccess { + continue + } + + retSecrets = append(retSecrets, &ProjectProjectSecret{ + ProjectSecret: projectSecret, + Project: project.Name, + }) + } + } + + return retSecrets, nil +} + +type ClusterSpace struct { + clusterv1.Space + Cluster string +} + +// GetSpaces returns all spaces accessible by the user or team +func GetSpaces(ctx context.Context, baseClient client.Client, log log.Logger) ([]ClusterSpace, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + spaceList := []ClusterSpace{} + for _, cluster := range clusterList.Items { + clusterClient, err := baseClient.Cluster(cluster.Name) + if err != nil { + return nil, err + } + + spaces, err := clusterClient.Agent().ClusterV1().Spaces().List(ctx, metav1.ListOptions{}) + if err != nil { + if kerrors.IsForbidden(err) { + continue + } + + log.Warnf("Error retrieving spaces from cluster %s: %v", clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName), err) + continue + } + + for _, space := range spaces.Items { + spaceList = append(spaceList, ClusterSpace{ + Space: space, + Cluster: cluster.Name, + }) + } + } + sort.Slice(spaceList, func(i, j int) bool { + return spaceList[i].Name < spaceList[j].Name + }) + + return spaceList, nil +} + +type ClusterVirtualCluster struct { + clusterv1.VirtualCluster + Cluster string +} + +// GetVirtualClusters returns all virtual clusters the user has access to +func GetVirtualClusters(ctx context.Context, baseClient client.Client, log log.Logger) ([]ClusterVirtualCluster, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + virtualClusterList := []ClusterVirtualCluster{} + for _, cluster := range clusterList.Items { + clusterClient, err := baseClient.Cluster(cluster.Name) + if err != nil { + return nil, err + } + + virtualClusters, err := clusterClient.Agent().ClusterV1().VirtualClusters("").List(ctx, metav1.ListOptions{}) + if err != nil { + if kerrors.IsForbidden(err) { + continue + } + + log.Warnf("Error retrieving virtual clusters from cluster %s: %v", clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName), err) + continue + } + + for _, virtualCluster := range virtualClusters.Items { + virtualClusterList = append(virtualClusterList, ClusterVirtualCluster{ + VirtualCluster: virtualCluster, + Cluster: cluster.Name, + }) + } + } + sort.Slice(virtualClusterList, func(i, j int) bool { + return virtualClusterList[i].Name < virtualClusterList[j].Name + }) + + return virtualClusterList, nil +} + +// SelectSpaceAndClusterName selects a space and cluster name +func SelectSpaceAndClusterName(ctx context.Context, baseClient client.Client, spaceName, clusterName string, log log.Logger) (string, string, error) { + spaces, err := GetSpaces(ctx, baseClient, log) + if err != nil { + return "", "", err + } + + currentContext, err := kubeconfig.CurrentContext() + if err != nil { + return "", "", fmt.Errorf("loading kubernetes config: %w", err) + } + + isLoftContext, cluster, namespace, vCluster := kubeconfig.ParseContext(currentContext) + matchedSpaces := []ClusterSpace{} + questionOptionsUnformatted := [][]string{} + defaultIndex := 0 + for _, space := range spaces { + if spaceName != "" && space.Space.Name != spaceName { + continue + } else if clusterName != "" && space.Cluster != clusterName { + continue + } else if len(matchedSpaces) > 20 { + break + } + + if isLoftContext && vCluster == "" && cluster == space.Cluster && namespace == space.Space.Name { + defaultIndex = len(questionOptionsUnformatted) + } + + matchedSpaces = append(matchedSpaces, space) + spaceName := space.Space.Name + if space.Space.Annotations != nil && space.Space.Annotations["loft.sh/display-name"] != "" { + spaceName = space.Space.Annotations["loft.sh/display-name"] + " (" + spaceName + ")" + } + + questionOptionsUnformatted = append(questionOptionsUnformatted, []string{spaceName, space.Cluster}) + } + + questionOptions := formatOptions("Space: %s | Cluster: %s", questionOptionsUnformatted) + if len(questionOptions) == 0 { + if spaceName == "" { + return "", "", fmt.Errorf("couldn't find any space") + } else if clusterName != "" { + return "", "", fmt.Errorf("couldn't find space %s in cluster %s", ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) + } + + return "", "", fmt.Errorf("couldn't find space %s", ansi.Color(spaceName, "white+b")) + } else if len(questionOptions) == 1 { + return matchedSpaces[0].Space.Name, matchedSpaces[0].Cluster, nil + } + + selectedSpace, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a space", + DefaultValue: questionOptions[defaultIndex], + Options: questionOptions, + }) + if err != nil { + return "", "", err + } + + for idx, s := range questionOptions { + if s == selectedSpace { + clusterName = matchedSpaces[idx].Cluster + spaceName = matchedSpaces[idx].Space.Name + break + } + } + + return spaceName, clusterName, nil +} + +func GetCurrentUser(ctx context.Context, managementClient kube.Interface) (*managementv1.UserInfo, *clusterv1.EntityInfo, error) { + self, err := managementClient.Loft().ManagementV1().Selves().Create(ctx, &managementv1.Self{}, metav1.CreateOptions{}) + if err != nil { + return nil, nil, fmt.Errorf("get self: %w", err) + } else if self.Status.User == nil && self.Status.Team == nil { + return nil, nil, fmt.Errorf("no user or team name returned") + } + + return self.Status.User, self.Status.Team, nil +} + +func SelectVirtualClusterAndSpaceAndClusterName(ctx context.Context, baseClient client.Client, virtualClusterName, spaceName, clusterName string, log log.Logger) (string, string, string, error) { + virtualClusters, err := GetVirtualClusters(ctx, baseClient, log) + if err != nil { + return "", "", "", err + } + + currentContext, err := kubeconfig.CurrentContext() + if err != nil { + return "", "", "", fmt.Errorf("loading kubernetes config: %w", err) + } + + isLoftContext, cluster, namespace, vCluster := kubeconfig.ParseContext(currentContext) + matchedVClusters := []ClusterVirtualCluster{} + questionOptionsUnformatted := [][]string{} + defaultIndex := 0 + for _, virtualCluster := range virtualClusters { + if virtualClusterName != "" && virtualCluster.VirtualCluster.Name != virtualClusterName { + continue + } else if spaceName != "" && virtualCluster.VirtualCluster.Namespace != spaceName { + continue + } else if clusterName != "" && virtualCluster.Cluster != clusterName { + continue + } + + if isLoftContext && vCluster == virtualCluster.VirtualCluster.Name && cluster == virtualCluster.Cluster && namespace == virtualCluster.VirtualCluster.Namespace { + defaultIndex = len(questionOptionsUnformatted) + } + + matchedVClusters = append(matchedVClusters, virtualCluster) + vClusterName := virtualCluster.VirtualCluster.Name + if virtualCluster.VirtualCluster.Annotations != nil && virtualCluster.VirtualCluster.Annotations["loft.sh/display-name"] != "" { + vClusterName = virtualCluster.VirtualCluster.Annotations["loft.sh/display-name"] + " (" + vClusterName + ")" + } + + questionOptionsUnformatted = append(questionOptionsUnformatted, []string{vClusterName, virtualCluster.VirtualCluster.Namespace, virtualCluster.Cluster}) + } + + questionOptions := formatOptions("vCluster: %s | Space: %s | Cluster: %s", questionOptionsUnformatted) + if len(questionOptions) == 0 { + if virtualClusterName == "" { + return "", "", "", fmt.Errorf("couldn't find any virtual cluster") + } else if spaceName != "" { + return "", "", "", fmt.Errorf("couldn't find virtualcluster %s in space %s in cluster %s", ansi.Color(virtualClusterName, "white+b"), ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) + } else if clusterName != "" { + return "", "", "", fmt.Errorf("couldn't find virtualcluster %s in space %s in cluster %s", ansi.Color(virtualClusterName, "white+b"), ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) + } + + return "", "", "", fmt.Errorf("couldn't find virtual cluster %s", ansi.Color(virtualClusterName, "white+b")) + } else if len(questionOptions) == 1 { + return matchedVClusters[0].VirtualCluster.Name, matchedVClusters[0].VirtualCluster.Namespace, matchedVClusters[0].Cluster, nil + } + + selectedSpace, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a virtual cluster to use", + DefaultValue: questionOptions[defaultIndex], + Options: questionOptions, + }) + if err != nil { + return "", "", "", err + } + + for idx, s := range questionOptions { + if s == selectedSpace { + clusterName = matchedVClusters[idx].Cluster + virtualClusterName = matchedVClusters[idx].VirtualCluster.Name + spaceName = matchedVClusters[idx].VirtualCluster.Namespace + break + } + } + + return virtualClusterName, spaceName, clusterName, nil +} + +func formatOptions(format string, options [][]string) []string { + if len(options) == 0 { + return []string{} + } + + columnLengths := make([]int, len(options[0])) + for _, row := range options { + for i, column := range row { + if len(column) > columnLengths[i] { + columnLengths[i] = len(column) + } + } + } + + retOptions := []string{} + for _, row := range options { + columns := []interface{}{} + for i := range row { + value := row[i] + if columnLengths[i] > len(value) { + value = value + strings.Repeat(" ", columnLengths[i]-len(value)) + } + + columns = append(columns, value) + } + + retOptions = append(retOptions, fmt.Sprintf(format, columns...)) + } + + return retOptions +} + +func getProjectSpaceInstance(ctx context.Context, managementClient kube.Interface, project *managementv1.Project, spaceName string) (*SpaceInstanceProject, error) { + spaceInstance := &managementv1.SpaceInstance{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("spaceinstances"). + Namespace(naming.ProjectNamespace(project.Name)). + Name(spaceName). + VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(spaceInstance) + if err != nil { + return nil, err + } + + if !spaceInstance.Status.CanUse { + return nil, fmt.Errorf("no use access") + } + + return &SpaceInstanceProject{ + SpaceInstance: spaceInstance, + Project: project, + }, nil +} + +func getProjectSpaceInstances(ctx context.Context, managementClient kube.Interface, project *managementv1.Project) ([]*SpaceInstanceProject, error) { + spaceInstanceList := &managementv1.SpaceInstanceList{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("spaceinstances"). + Namespace(naming.ProjectNamespace(project.Name)). + VersionedParams(&metav1.ListOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(spaceInstanceList) + if err != nil { + return nil, err + } + + var spaces []*SpaceInstanceProject + for _, spaceInstance := range spaceInstanceList.Items { + if !spaceInstance.Status.CanUse { + continue + } + + s := spaceInstance + spaces = append(spaces, &SpaceInstanceProject{ + SpaceInstance: &s, + Project: project, + }) + } + return spaces, nil +} + +func getProjectVirtualClusterInstance(ctx context.Context, managementClient kube.Interface, project *managementv1.Project, virtualClusterName string) (*VirtualClusterInstanceProject, error) { + virtualClusterInstance := &managementv1.VirtualClusterInstance{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("virtualclusterinstances"). + Namespace(naming.ProjectNamespace(project.Name)). + Name(virtualClusterName). + VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(virtualClusterInstance) + if err != nil { + return nil, err + } + + if !virtualClusterInstance.Status.CanUse { + return nil, fmt.Errorf("no use access") + } + + return &VirtualClusterInstanceProject{ + VirtualCluster: virtualClusterInstance, + Project: project, + }, nil +} + +func getProjectVirtualClusterInstances(ctx context.Context, managementClient kube.Interface, project *managementv1.Project) ([]*VirtualClusterInstanceProject, error) { + virtualClusterInstanceList := &managementv1.VirtualClusterInstanceList{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("virtualclusterinstances"). + Namespace(naming.ProjectNamespace(project.Name)). + VersionedParams(&metav1.ListOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(virtualClusterInstanceList) + if err != nil { + return nil, err + } + + var virtualClusters []*VirtualClusterInstanceProject + for _, virtualClusterInstance := range virtualClusterInstanceList.Items { + if !virtualClusterInstance.Status.CanUse { + continue + } + + v := virtualClusterInstance + virtualClusters = append(virtualClusters, &VirtualClusterInstanceProject{ + VirtualCluster: &v, + Project: project, + }) + } + return virtualClusters, nil +} diff --git a/pkg/platform/kube/client.go b/pkg/platform/kube/client.go new file mode 100644 index 000000000..21699183b --- /dev/null +++ b/pkg/platform/kube/client.go @@ -0,0 +1,54 @@ +package kube + +import ( + agentloftclient "github.com/loft-sh/agentapi/v4/pkg/client/loft/clientset_generated/clientset" + loftclient "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset" + + "github.com/pkg/errors" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +type Interface interface { + kubernetes.Interface + Loft() loftclient.Interface + Agent() agentloftclient.Interface +} + +func NewForConfig(c *rest.Config) (Interface, error) { + kubeClient, err := kubernetes.NewForConfig(c) + if err != nil { + return nil, errors.Wrap(err, "create kube client") + } + + loftClient, err := loftclient.NewForConfig(c) + if err != nil { + return nil, errors.Wrap(err, "create loft client") + } + + agentLoftClient, err := agentloftclient.NewForConfig(c) + if err != nil { + return nil, errors.Wrap(err, "create kiosk client") + } + + return &client{ + Interface: kubeClient, + loftClient: loftClient, + agentLoftClient: agentLoftClient, + }, nil +} + +type client struct { + kubernetes.Interface + loftClient loftclient.Interface + agentLoftClient agentloftclient.Interface +} + +func (c *client) Loft() loftclient.Interface { + return c.loftClient +} + +func (c *client) Agent() agentloftclient.Interface { + return c.agentLoftClient +} diff --git a/pkg/platform/kubeconfig/kubeconfig.go b/pkg/platform/kubeconfig/kubeconfig.go new file mode 100644 index 000000000..602ecf63e --- /dev/null +++ b/pkg/platform/kubeconfig/kubeconfig.go @@ -0,0 +1,266 @@ +package kubeconfig + +import ( + "io" + "os" + "path/filepath" + "strings" + + "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" +) + +type ContextOptions struct { + Name string + Server string + CaData []byte + ConfigPath string + InsecureSkipTLSVerify bool + DirectClusterEndpointEnabled bool + VirtualClusterAccessPointEnabled bool + + Token string + ClientKeyData []byte + ClientCertificateData []byte + + CurrentNamespace string + SetActive bool +} + +func SpaceInstanceContextName(projectName, spaceInstanceName string) string { + return "loft_" + spaceInstanceName + "_" + projectName +} + +func VirtualClusterInstanceContextName(projectName, virtualClusterInstance string) string { + return "loft-vcluster_" + virtualClusterInstance + "_" + projectName +} + +func virtualClusterInstanceProjectAndNameFromContextName(contextName string) (string, string) { + return strings.Split(contextName, "_")[2], strings.Split(contextName, "_")[1] +} + +func SpaceContextName(clusterName, namespaceName string) string { + contextName := "loft_" + if namespaceName != "" { + contextName += namespaceName + "_" + } + + contextName += clusterName + return contextName +} + +func VirtualClusterContextName(clusterName, namespaceName, virtualClusterName string) string { + return "loft-vcluster_" + virtualClusterName + "_" + namespaceName + "_" + clusterName +} + +func ManagementContextName() string { + return "loft-management" +} + +func ParseContext(contextName string) (isLoftContext bool, cluster string, namespace string, vCluster string) { + splitted := strings.Split(contextName, "_") + if len(splitted) == 0 || (splitted[0] != "loft" && splitted[0] != "loft-vcluster") { + return false, "", "", "" + } + + // cluster or space context + if splitted[0] == "loft" { + if len(splitted) > 3 || len(splitted) == 1 { + return false, "", "", "" + } else if len(splitted) == 2 { + return true, splitted[1], "", "" + } + + return true, splitted[2], splitted[1], "" + } + + // vCluster context + if len(splitted) != 4 { + return false, "", "", "" + } + + return true, splitted[3], splitted[2], splitted[1] +} + +func CurrentContext() (string, error) { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return "", err + } + + return config.CurrentContext, nil +} + +// DeleteContext deletes the context with the given name from the kube config +func DeleteContext(contextName string) error { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return err + } + + delete(config.Contexts, contextName) + delete(config.Clusters, contextName) + delete(config.AuthInfos, contextName) + + if config.CurrentContext == contextName { + config.CurrentContext = "" + for name := range config.Contexts { + config.CurrentContext = name + break + } + } + + // Save the config + return clientcmd.ModifyConfig(clientcmd.NewDefaultClientConfigLoadingRules(), config, false) +} + +func updateKubeConfig(contextName string, cluster *api.Cluster, authInfo *api.AuthInfo, namespaceName string, setActive bool) error { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return err + } + + config.Clusters[contextName] = cluster + config.AuthInfos[contextName] = authInfo + + // Update kube context + context := api.NewContext() + context.Cluster = contextName + context.AuthInfo = contextName + context.Namespace = namespaceName + + config.Contexts[contextName] = context + if setActive { + config.CurrentContext = contextName + } + + // Save the config + return clientcmd.ModifyConfig(clientcmd.NewDefaultClientConfigLoadingRules(), config, false) +} + +func printKubeConfigTo(contextName string, cluster *api.Cluster, authInfo *api.AuthInfo, namespaceName string, writer io.Writer) error { + config := api.NewConfig() + + config.Clusters[contextName] = cluster + config.AuthInfos[contextName] = authInfo + + // Update kube context + context := api.NewContext() + context.Cluster = contextName + context.AuthInfo = contextName + context.Namespace = namespaceName + + config.Contexts[contextName] = context + config.CurrentContext = contextName + + // set kind & version + config.APIVersion = "v1" + config.Kind = "Config" + + out, err := clientcmd.Write(*config) + if err != nil { + return err + } + + _, err = writer.Write(out) + return err +} + +// UpdateKubeConfig updates the kube config and adds the virtual cluster context +func UpdateKubeConfig(options ContextOptions) error { + contextName, cluster, authInfo, err := createContext(options) + if err != nil { + return err + } + + // we don't want to set the space name here as the default namespace in the virtual cluster, because it couldn't exist + return updateKubeConfig(contextName, cluster, authInfo, options.CurrentNamespace, options.SetActive) +} + +// PrintKubeConfigTo prints the given config to the writer +func PrintKubeConfigTo(options ContextOptions, writer io.Writer) error { + contextName, cluster, authInfo, err := createContext(options) + if err != nil { + return err + } + + // we don't want to set the space name here as the default namespace in the virtual cluster, because it couldn't exist + return printKubeConfigTo(contextName, cluster, authInfo, options.CurrentNamespace, writer) +} + +// PrintTokenKubeConfig writes the kube config to the os.Stdout +func PrintTokenKubeConfig(restConfig *rest.Config, token string) error { + contextName, cluster, authInfo := createTokenContext(restConfig, token) + + return printKubeConfigTo(contextName, cluster, authInfo, "", os.Stdout) +} + +// WriteTokenKubeConfig writes the kube config to the io.Writer +func WriteTokenKubeConfig(restConfig *rest.Config, token string, w io.Writer) error { + contextName, cluster, authInfo := createTokenContext(restConfig, token) + + return printKubeConfigTo(contextName, cluster, authInfo, "", w) +} + +func createTokenContext(restConfig *rest.Config, token string) (string, *api.Cluster, *api.AuthInfo) { + contextName := "default" + + cluster := api.NewCluster() + cluster.Server = restConfig.Host + cluster.InsecureSkipTLSVerify = restConfig.Insecure + cluster.CertificateAuthority = restConfig.CAFile + cluster.CertificateAuthorityData = restConfig.CAData + cluster.TLSServerName = restConfig.ServerName + + authInfo := api.NewAuthInfo() + authInfo.Token = token + + return contextName, cluster, authInfo +} + +func createContext(options ContextOptions) (string, *api.Cluster, *api.AuthInfo, error) { + contextName := options.Name + cluster := api.NewCluster() + cluster.Server = options.Server + cluster.CertificateAuthorityData = options.CaData + cluster.InsecureSkipTLSVerify = options.InsecureSkipTLSVerify + + authInfo := api.NewAuthInfo() + if options.Token != "" || options.ClientCertificateData != nil || options.ClientKeyData != nil { + authInfo.Token = options.Token + authInfo.ClientKeyData = options.ClientKeyData + authInfo.ClientCertificateData = options.ClientCertificateData + } else { + command, err := os.Executable() + if err != nil { + return "", nil, nil, err + } + + absConfigPath, err := filepath.Abs(options.ConfigPath) + if err != nil { + return "", nil, nil, err + } + + if options.VirtualClusterAccessPointEnabled { + projectName, virtualClusterName := virtualClusterInstanceProjectAndNameFromContextName(contextName) + authInfo.Exec = &api.ExecConfig{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Command: command, + Args: []string{"token", "--silent", "--project", projectName, "--virtual-cluster", virtualClusterName}, + } + } else { + authInfo.Exec = &api.ExecConfig{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Command: command, + Args: []string{"token", "--silent", "--config", absConfigPath}, + } + if options.DirectClusterEndpointEnabled { + authInfo.Exec.Args = append(authInfo.Exec.Args, "--direct-cluster-endpoint") + } + } + } + + return contextName, cluster, authInfo, nil +} diff --git a/pkg/platform/platformclihelper/clihelper.go b/pkg/platform/platformclihelper/clihelper.go new file mode 100644 index 000000000..5351e4235 --- /dev/null +++ b/pkg/platform/platformclihelper/clihelper.go @@ -0,0 +1,774 @@ +package clihelper + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "sort" + "strconv" + "strings" + "time" + + clusterv1 "github.com/loft-sh/agentapi/v4/pkg/apis/loft/cluster/v1" + storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/httputil" + "github.com/sirupsen/logrus" + + jsonpatch "github.com/evanphx/json-patch" + loftclientset "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset" + "github.com/loft-sh/loftctl/v4/pkg/config" + "github.com/loft-sh/loftctl/v4/pkg/portforward" + "github.com/loft-sh/log" + "github.com/loft-sh/log/survey" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/transport/spdy" + "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" +) + +// CriticalStatus container status +var CriticalStatus = map[string]bool{ + "Error": true, + "Unknown": true, + "ImagePullBackOff": true, + "CrashLoopBackOff": true, + "RunContainerError": true, + "ErrImagePull": true, + "CreateContainerConfigError": true, + "InvalidImageName": true, +} + +const defaultReleaseName = "loft" + +const LoftRouterDomainSecret = "loft-router-domain" + +var defaultDeploymentName = "loft" + +func GetDisplayName(name string, displayName string) string { + if displayName != "" { + return displayName + } + + return name +} + +func GetTableDisplayName(name string, displayName string) string { + if displayName != "" && displayName != name { + return displayName + " (" + name + ")" + } + + return name +} + +func DisplayName(entityInfo *clusterv1.EntityInfo) string { + if entityInfo == nil { + return "" + } else if entityInfo.DisplayName != "" { + return entityInfo.DisplayName + } else if entityInfo.Username != "" { + return entityInfo.Username + } + + return entityInfo.Name +} + +func GetLoftIngressHost(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (string, error) { + ingress, err := kubeClient.NetworkingV1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + if err != nil { + ingress, err := kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + if err != nil { + return "", err + } else { + // find host + for _, rule := range ingress.Spec.Rules { + return rule.Host, nil + } + } + } else { + // find host + for _, rule := range ingress.Spec.Rules { + return rule.Host, nil + } + } + + return "", fmt.Errorf("couldn't find any host in loft ingress '%s/loft-ingress', please make sure you have not changed any deployed resources", namespace) +} + +func WaitForReadyLoftPod(ctx context.Context, kubeClient kubernetes.Interface, namespace string, log log.Logger) (*corev1.Pod, error) { + // wait until we have a running loft pod + now := time.Now() + pod := &corev1.Pod{} + err := wait.PollUntilContextTimeout(ctx, time.Second*2, config.Timeout(), true, func(ctx context.Context) (bool, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "app=loft", + }) + if err != nil { + log.Warnf("Error trying to retrieve %s pod: %v", product.DisplayName(), err) + return false, nil + } else if len(pods.Items) == 0 { + if time.Now().After(now.Add(time.Second * 10)) { + log.Infof("Still waiting for a %s pod...", product.DisplayName()) + now = time.Now() + } + return false, nil + } + + sort.Slice(pods.Items, func(i, j int) bool { + return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) + }) + + loftPod := &pods.Items[0] + found := false + for _, containerStatus := range loftPod.Status.ContainerStatuses { + if containerStatus.State.Running != nil && containerStatus.Ready { + if containerStatus.Name == "manager" { + found = true + } + + continue + } else if containerStatus.State.Terminated != nil || (containerStatus.State.Waiting != nil && CriticalStatus[containerStatus.State.Waiting.Reason]) { + reason := "" + message := "" + if containerStatus.State.Terminated != nil { + reason = containerStatus.State.Terminated.Reason + message = containerStatus.State.Terminated.Message + } else if containerStatus.State.Waiting != nil { + reason = containerStatus.State.Waiting.Reason + message = containerStatus.State.Waiting.Message + } + + out, err := kubeClient.CoreV1().Pods(namespace).GetLogs(loftPod.Name, &corev1.PodLogOptions{ + Container: "manager", + }).Do(context.Background()).Raw() + if err != nil { + return false, fmt.Errorf("there seems to be an issue with %s starting up: %s (%s). Please reach out to our support at https://loft.sh/", product.DisplayName(), message, reason) + } + if strings.Contains(string(out), "register instance: Post \"https://license.loft.sh/register\": dial tcp") { + return false, fmt.Errorf("%[1]s logs: \n%[2]v \nThere seems to be an issue with %[1]s starting up. Looks like you try to install %[1]s into an air-gapped environment, please reach out to our support at https://loft.sh/ for an offline license", product.DisplayName(), string(out)) + } + + return false, fmt.Errorf("%[1]s logs: \n%v \nThere seems to be an issue with %[1]s starting up: %[2]s (%[3]s). Please reach out to our support at https://loft.sh/", product.DisplayName(), string(out), message, reason) + } else if containerStatus.State.Waiting != nil && time.Now().After(now.Add(time.Second*10)) { + if containerStatus.State.Waiting.Message != "" { + log.Infof("Please keep waiting, %s container is still starting up: %s (%s)", product.DisplayName(), containerStatus.State.Waiting.Message, containerStatus.State.Waiting.Reason) + } else if containerStatus.State.Waiting.Reason != "" { + log.Infof("Please keep waiting, %s container is still starting up: %s", product.DisplayName(), containerStatus.State.Waiting.Reason) + } else { + log.Infof("Please keep waiting, %s container is still starting up...", product.DisplayName()) + } + + now = time.Now() + } + + return false, nil + } + + pod = loftPod + return found, nil + }) + if err != nil { + return nil, err + } + + return pod, nil +} + +func StartPortForwarding(ctx context.Context, config *rest.Config, client kubernetes.Interface, pod *corev1.Pod, localPort string, log log.Logger) (chan struct{}, error) { + log.WriteString(logrus.InfoLevel, "\n") + log.Infof("Starting port-forwarding to the %s pod", product.DisplayName()) + execRequest := client.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(pod.Name). + Namespace(pod.Namespace). + SubResource("portforward") + + t, upgrader, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: t}, "POST", execRequest.URL()) + errChan := make(chan error) + readyChan := make(chan struct{}) + stopChan := make(chan struct{}) + targetPort := getPortForwardingTargetPort(pod) + forwarder, err := portforward.New(dialer, []string{localPort + ":" + strconv.Itoa(targetPort)}, stopChan, readyChan, errChan, io.Discard, io.Discard) + if err != nil { + return nil, err + } + + go func() { + err := forwarder.ForwardPorts(ctx) + if err != nil { + errChan <- err + } + }() + + // wait till ready + select { + case err = <-errChan: + return nil, err + case <-readyChan: + case <-stopChan: + return nil, fmt.Errorf("stopped before ready") + } + + // start watcher + go func() { + for { + select { + case <-stopChan: + return + case err = <-errChan: + log.Infof("error during port forwarder: %v", err) + close(stopChan) + return + } + } + }() + + return stopChan, nil +} + +func GetLoftDefaultPassword(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (string, error) { + loftNamespace, err := kubeClient.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + loftNamespace, err := kubeClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + }, metav1.CreateOptions{}) + if err != nil { + return "", err + } + + return string(loftNamespace.UID), nil + } + + return "", err + } + + return string(loftNamespace.UID), nil +} + +type version struct { + Version string `json:"version"` +} + +func IsLoftReachable(ctx context.Context, host string) (bool, error) { + // wait until loft is reachable at the given url + client := &http.Client{ + Transport: httputil.InsecureTransport(), + } + url := "https://" + host + "/version" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return false, fmt.Errorf("error creating request with context: %w", err) + } + resp, err := client.Do(req) + if err == nil && resp.StatusCode == http.StatusOK { + out, err := io.ReadAll(resp.Body) + if err != nil { + return false, nil + } + + v := &version{} + err = json.Unmarshal(out, v) + if err != nil { + return false, fmt.Errorf("error decoding response from %s: %w. Try running '%s --reset'", url, err, product.StartCmd()) + } else if v.Version == "" { + return false, fmt.Errorf("unexpected response from %s: %s. Try running '%s --reset'", url, string(out), product.StartCmd()) + } + + return true, nil + } + + return false, nil +} + +func IsLocalCluster(host string, log log.Logger) bool { + url, err := url.Parse(host) + if err != nil { + log.Warnf("Couldn't parse kube context host url: %v", err) + return false + } + + hostname := url.Hostname() + ip := net.ParseIP(hostname) + if ip != nil { + if IsPrivateIP(ip) { + return true + } + } + + if hostname == "localhost" || strings.HasSuffix(hostname, ".internal") || strings.HasSuffix(hostname, ".localhost") { + return true + } + + return false +} + +var privateIPBlocks []*net.IPNet + +func init() { + for _, cidr := range []string{ + "127.0.0.0/8", // IPv4 loopback + "10.0.0.0/8", // RFC1918 + "172.16.0.0/12", // RFC1918 + "192.168.0.0/16", // RFC1918 + "::1/128", // IPv6 loopback + "fe80::/10", // IPv6 link-local + "fc00::/7", // IPv6 unique local addr + } { + _, block, _ := net.ParseCIDR(cidr) + privateIPBlocks = append(privateIPBlocks, block) + } +} + +// IsPrivateIP checks if a given ip is private +func IsPrivateIP(ip net.IP) bool { + for _, block := range privateIPBlocks { + if block.Contains(ip) { + return true + } + } + + return false +} + +func EnterHostNameQuestion(log log.Logger) (string, error) { + return log.Question(&survey.QuestionOptions{ + Question: fmt.Sprintf("Enter a hostname for your %s instance (e.g. loft.my-domain.tld): \n ", product.DisplayName()), + ValidationFunc: func(answer string) error { + u, err := url.Parse("https://" + answer) + if err != nil || u.Path != "" || u.Port() != "" || len(strings.Split(answer, ".")) < 2 { + return fmt.Errorf("please enter a valid hostname without protocol (https://), without path and without port, e.g. loft.my-domain.tld") + } + return nil + }, + }) +} + +func IsLoftAlreadyInstalled(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (bool, error) { + _, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + + return false, fmt.Errorf("error accessing kubernetes cluster: %w", err) + } + + return true, nil +} + +func UninstallLoft(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, kubeContext, namespace string, log log.Logger) error { + log.Infof("Uninstalling %s...", product.DisplayName()) + releaseName := defaultReleaseName + deploy, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } else if deploy != nil && deploy.Labels != nil && deploy.Labels["release"] != "" { + releaseName = deploy.Labels["release"] + } + + args := []string{ + "uninstall", + releaseName, + "--kube-context", + kubeContext, + "--namespace", + namespace, + } + log.Infof("Executing command: helm %s", strings.Join(args, " ")) + output, err := exec.Command("helm", args...).CombinedOutput() + if err != nil { + log.Errorf("error during helm command: %s (%v)", string(output), err) + } + + // we also cleanup the validating webhook configuration and apiservice + apiRegistrationClient, err := clientset.NewForConfig(restConfig) + if err != nil { + return err + } + + err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1.management.loft.sh", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = deleteUser(ctx, restConfig, "admin") + if err != nil { + return err + } + + err = kubeClient.CoreV1().Secrets(namespace).Delete(context.Background(), "loft-user-secret-admin", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = kubeClient.CoreV1().Secrets(namespace).Delete(context.Background(), LoftRouterDomainSecret, metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + // we also cleanup the validating webhook configuration and apiservice + err = kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(ctx, "loft-agent", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1alpha1.tenancy.kiosk.sh", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1.cluster.loft.sh", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, "loft-agent-controller", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, "loft-applied-defaults", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + log.WriteString(logrus.InfoLevel, "\n") + log.Done(product.Replace("Successfully uninstalled Loft")) + log.WriteString(logrus.InfoLevel, "\n") + + return nil +} + +func deleteUser(ctx context.Context, restConfig *rest.Config, name string) error { + loftClient, err := loftclientset.NewForConfig(restConfig) + if err != nil { + return err + } + + user, err := loftClient.StorageV1().Users().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil + } else if len(user.Finalizers) > 0 { + user.Finalizers = nil + _, err = loftClient.StorageV1().Users().Update(ctx, user, metav1.UpdateOptions{}) + if err != nil { + if kerrors.IsConflict(err) { + return deleteUser(ctx, restConfig, name) + } + + return err + } + } + + err = loftClient.StorageV1().Users().Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + return nil +} + +func EnsureIngressController(ctx context.Context, kubeClient kubernetes.Interface, kubeContext string, log log.Logger) error { + // first create an ingress controller + const ( + YesOption = "Yes" + NoOption = "No, I already have an ingress controller installed." + ) + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Ingress controller required. Should the nginx-ingress controller be installed?", + DefaultValue: YesOption, + Options: []string{ + YesOption, + NoOption, + }, + }) + if err != nil { + return err + } + + if answer == YesOption { + args := []string{ + "install", + "ingress-nginx", + "ingress-nginx", + "--repository-config=''", + "--repo", + "https://kubernetes.github.io/ingress-nginx", + "--kube-context", + kubeContext, + "--namespace", + "ingress-nginx", + "--create-namespace", + "--set-string", + "controller.config.hsts=false", + "--wait", + } + log.WriteString(logrus.InfoLevel, "\n") + log.Infof("Executing command: helm %s\n", strings.Join(args, " ")) + log.Info("Waiting for ingress controller deployment, this can take several minutes...") + helmCmd := exec.Command("helm", args...) + output, err := helmCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error during helm command: %s (%w)", string(output), err) + } + + list, err := kubeClient.CoreV1().Secrets("ingress-nginx").List(ctx, metav1.ListOptions{ + LabelSelector: "name=ingress-nginx,owner=helm,status=deployed", + }) + if err != nil { + return err + } + + if len(list.Items) == 1 { + secret := list.Items[0] + originalSecret := secret.DeepCopy() + secret.Labels["loft.sh/app"] = "true" + if secret.Annotations == nil { + secret.Annotations = map[string]string{} + } + + secret.Annotations["loft.sh/url"] = "https://kubernetes.github.io/ingress-nginx" + originalJSON, err := json.Marshal(originalSecret) + if err != nil { + return err + } + modifiedJSON, err := json.Marshal(secret) + if err != nil { + return err + } + data, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return err + } + _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Patch(ctx, secret.Name, types.MergePatchType, data, metav1.PatchOptions{}) + if err != nil { + return err + } + } + + log.Done("Successfully installed ingress-nginx to your kubernetes cluster!") + } + + return nil +} + +func UpgradeLoft(chartName, chartRepo, kubeContext, namespace string, extraArgs []string, log log.Logger) error { + // now we install loft + args := []string{ + "upgrade", + defaultReleaseName, + chartName, + "--install", + "--reuse-values", + "--create-namespace", + "--repository-config=''", + "--kube-context", + kubeContext, + "--namespace", + namespace, + } + if chartRepo != "" { + args = append(args, "--repo", chartRepo) + } + args = append(args, extraArgs...) + + log.WriteString(logrus.InfoLevel, "\n") + log.Infof("Executing command: helm %s\n", strings.Join(args, " ")) + log.Info("Waiting for helm command, this can take up to several minutes...") + helmCmd := exec.Command("helm", args...) + if chartRepo != "" { + helmWorkDir, err := getHelmWorkdir(chartName) + if err != nil { + return err + } + + helmCmd.Dir = helmWorkDir + } + output, err := helmCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error during helm command: %s (%w)", string(output), err) + } + + log.Donef("%s has been deployed to your cluster!", product.DisplayName()) + return nil +} + +func GetLoftManifests(chartName, chartRepo, kubeContext, namespace string, extraArgs []string, _ log.Logger) (string, error) { + args := []string{ + "template", + defaultReleaseName, + chartName, + "--repository-config=''", + "--kube-context", + kubeContext, + "--namespace", + namespace, + } + if chartRepo != "" { + args = append(args, "--repo", chartRepo) + } + args = append(args, extraArgs...) + + helmCmd := exec.Command("helm", args...) + if chartRepo != "" { + helmWorkDir, err := getHelmWorkdir(chartName) + if err != nil { + return "", err + } + + helmCmd.Dir = helmWorkDir + } + output, err := helmCmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("error during helm command: %s (%w)", string(output), err) + } + return string(output), nil +} + +// Return the directory where the `helm` commands should be executed or error if none can be found/created +// Uses current workdir by default unless it contains a folder with the chart name +func getHelmWorkdir(chartName string) (string, error) { + // If chartName folder exists, check temp dir next + if _, err := os.Stat(chartName); err == nil { + tempDir := os.TempDir() + + // If tempDir/chartName folder exists, create temp folder + if _, err := os.Stat(path.Join(tempDir, chartName)); err == nil { + tempDir, err = os.MkdirTemp(tempDir, chartName) + if err != nil { + return "", errors.New("problematic directory `" + chartName + "` found: please execute command in a different folder") + } + } + + // Use tempDir + return tempDir, nil + } + + // Use current workdir + return "", nil +} + +// Makes sure that admin user and password secret exists +// Returns (true, nil) if everything is correct but password is different from parameter `password` +func EnsureAdminPassword(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, password string, log log.Logger) (bool, error) { + loftClient, err := loftclientset.NewForConfig(restConfig) + if err != nil { + return false, err + } + + admin, err := loftClient.StorageV1().Users().Get(ctx, "admin", metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return false, err + } else if admin == nil { + admin, err = loftClient.StorageV1().Users().Create(ctx, &storagev1.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "admin", + }, + Spec: storagev1.UserSpec{ + Username: "admin", + Email: "test@domain.tld", + Subject: "admin", + Groups: []string{"system:masters"}, + PasswordRef: &storagev1.SecretRef{ + SecretName: "loft-user-secret-admin", + SecretNamespace: "loft", + Key: "password", + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return false, err + } + } else if admin.Spec.PasswordRef == nil || admin.Spec.PasswordRef.SecretName == "" || admin.Spec.PasswordRef.SecretNamespace == "" { + return false, nil + } + + key := admin.Spec.PasswordRef.Key + if key == "" { + key = "password" + } + + passwordHash := fmt.Sprintf("%x", sha256.Sum256([]byte(password))) + + secret, err := kubeClient.CoreV1().Secrets(admin.Spec.PasswordRef.SecretNamespace).Get(ctx, admin.Spec.PasswordRef.SecretName, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return false, err + } else if err == nil { + existingPasswordHash, keyExists := secret.Data[key] + if keyExists { + return (string(existingPasswordHash) != passwordHash), nil + } + + secret.Data[key] = []byte(passwordHash) + _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Update(ctx, secret, metav1.UpdateOptions{}) + if err != nil { + return false, errors.Wrap(err, "update admin password secret") + } + return false, nil + } + + // create the password secret if it was not found, this can happen if you delete the loft namespace without deleting the admin user + secret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: admin.Spec.PasswordRef.SecretName, + Namespace: admin.Spec.PasswordRef.SecretNamespace, + }, + Data: map[string][]byte{ + key: []byte(passwordHash), + }, + } + _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Create(ctx, secret, metav1.CreateOptions{}) + if err != nil { + return false, errors.Wrap(err, "create admin password secret") + } + + log.Info("Successfully recreated admin password secret") + return false, nil +} + +func IsLoftInstalledLocally(ctx context.Context, kubeClient kubernetes.Interface, namespace string) bool { + _, err := kubeClient.NetworkingV1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + _, err = kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + return kerrors.IsNotFound(err) + } + + return kerrors.IsNotFound(err) +} + +func getPortForwardingTargetPort(pod *corev1.Pod) int { + for _, container := range pod.Spec.Containers { + if container.Name == "manager" { + for _, port := range container.Ports { + if port.Name == "https" { + return int(port.ContainerPort) + } + } + } + } + + return 10443 +} From 870cb8d430576dda08b0d658bb7f211d52d2ac43 Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 15:55:44 +0200 Subject: [PATCH 10/17] fixed some imports --- pkg/platform/helper/helper.go | 50 +++++++++++++++++------------------ 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/pkg/platform/helper/helper.go b/pkg/platform/helper/helper.go index 3fc70b39b..1775ef5bd 100644 --- a/pkg/platform/helper/helper.go +++ b/pkg/platform/helper/helper.go @@ -9,17 +9,17 @@ import ( "strings" "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset/scheme" - "github.com/loft-sh/loftctl/v4/pkg/client/naming" authorizationv1 "k8s.io/api/authorization/v1" clusterv1 "github.com/loft-sh/agentapi/v4/pkg/apis/loft/cluster/v1" managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" - "github.com/loft-sh/loftctl/v4/pkg/client" - "github.com/loft-sh/loftctl/v4/pkg/clihelper" - "github.com/loft-sh/loftctl/v4/pkg/kube" - "github.com/loft-sh/loftctl/v4/pkg/kubeconfig" "github.com/loft-sh/log" "github.com/loft-sh/log/survey" + "github.com/loft-sh/vcluster/pkg/platform" + "github.com/loft-sh/vcluster/pkg/platform/kube" + "github.com/loft-sh/vcluster/pkg/platform/kubeconfig" + clihelper "github.com/loft-sh/vcluster/pkg/platform/platformclihelper" + "github.com/loft-sh/vcluster/pkg/projectutil" "github.com/mgutz/ansi" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -38,7 +38,7 @@ type SpaceInstanceProject struct { Project *managementv1.Project } -func SelectVirtualClusterTemplate(ctx context.Context, baseClient client.Client, projectName, templateName string, log log.Logger) (*managementv1.VirtualClusterTemplate, error) { +func SelectVirtualClusterTemplate(ctx context.Context, baseClient platform.Client, projectName, templateName string, log log.Logger) (*managementv1.VirtualClusterTemplate, error) { managementClient, err := baseClient.Management() if err != nil { return nil, err @@ -90,7 +90,7 @@ func SelectVirtualClusterTemplate(ctx context.Context, baseClient client.Client, return nil, fmt.Errorf("answer not found") } -func SelectSpaceTemplate(ctx context.Context, baseClient client.Client, projectName, templateName string, log log.Logger) (*managementv1.SpaceTemplate, error) { +func SelectSpaceTemplate(ctx context.Context, baseClient platform.Client, projectName, templateName string, log log.Logger) (*managementv1.SpaceTemplate, error) { managementClient, err := baseClient.Management() if err != nil { return nil, err @@ -142,7 +142,7 @@ func SelectSpaceTemplate(ctx context.Context, baseClient client.Client, projectN return nil, fmt.Errorf("answer not found") } -func SelectVirtualClusterInstanceOrVirtualCluster(ctx context.Context, baseClient client.Client, virtualClusterName, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, string, error) { +func SelectVirtualClusterInstanceOrVirtualCluster(ctx context.Context, baseClient platform.Client, virtualClusterName, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, string, error) { if clusterName != "" || spaceName != "" { virtualCluster, space, cluster, err := SelectVirtualClusterAndSpaceAndClusterName(ctx, baseClient, virtualClusterName, spaceName, clusterName, log) return cluster, "", space, virtualCluster, err @@ -234,7 +234,7 @@ func SelectVirtualClusterInstanceOrVirtualCluster(ctx context.Context, baseClien return "", "", "", "", fmt.Errorf("couldn't find answer") } -func SelectSpaceInstanceOrSpace(ctx context.Context, baseClient client.Client, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, error) { +func SelectSpaceInstanceOrSpace(ctx context.Context, baseClient platform.Client, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, error) { if clusterName != "" { space, cluster, err := SelectSpaceAndClusterName(ctx, baseClient, spaceName, clusterName, log) return cluster, "", space, err @@ -326,7 +326,7 @@ func SelectSpaceInstanceOrSpace(ctx context.Context, baseClient client.Client, s return "", "", "", fmt.Errorf("couldn't find answer") } -func SelectProjectOrCluster(ctx context.Context, baseClient client.Client, clusterName, projectName string, allowClusterOnly bool, log log.Logger) (cluster string, project string, err error) { +func SelectProjectOrCluster(ctx context.Context, baseClient platform.Client, clusterName, projectName string, allowClusterOnly bool, log log.Logger) (cluster string, project string, err error) { if projectName != "" { return clusterName, projectName, nil } else if allowClusterOnly && clusterName != "" { @@ -392,7 +392,7 @@ func SelectProjectOrCluster(ctx context.Context, baseClient client.Client, clust } // SelectCluster lets the user select a cluster -func SelectCluster(ctx context.Context, baseClient client.Client, log log.Logger) (string, error) { +func SelectCluster(ctx context.Context, baseClient platform.Client, log log.Logger) (string, error) { managementClient, err := baseClient.Management() if err != nil { return "", err @@ -431,7 +431,7 @@ func SelectCluster(ctx context.Context, baseClient client.Client, log log.Logger } // SelectProjectCluster lets the user select a cluster from the project's allowed clusters -func SelectProjectCluster(ctx context.Context, baseClient client.Client, project *managementv1.Project, log log.Logger) (string, error) { +func SelectProjectCluster(ctx context.Context, baseClient platform.Client, project *managementv1.Project, log log.Logger) (string, error) { if !term.IsTerminal(os.Stdin) { // Allow loft to schedule as before return "", nil @@ -488,7 +488,7 @@ func SelectProjectCluster(ctx context.Context, baseClient client.Client, project } // SelectUserOrTeam lets the user select an user or team in a cluster -func SelectUserOrTeam(ctx context.Context, baseClient client.Client, clusterName string, log log.Logger) (*clusterv1.EntityInfo, *clusterv1.EntityInfo, error) { +func SelectUserOrTeam(ctx context.Context, baseClient platform.Client, clusterName string, log log.Logger) (*clusterv1.EntityInfo, *clusterv1.EntityInfo, error) { managementClient, err := baseClient.Management() if err != nil { return nil, nil, err @@ -552,7 +552,7 @@ type ClusterUserOrTeam struct { ClusterMember managementv1.ClusterMember } -func SelectClusterUserOrTeam(ctx context.Context, baseClient client.Client, clusterName, userName, teamName string, log log.Logger) (*ClusterUserOrTeam, error) { +func SelectClusterUserOrTeam(ctx context.Context, baseClient platform.Client, clusterName, userName, teamName string, log log.Logger) (*ClusterUserOrTeam, error) { if userName != "" && teamName != "" { return nil, fmt.Errorf("team and user specified, please only choose one") } @@ -636,7 +636,7 @@ func SelectClusterUserOrTeam(ctx context.Context, baseClient client.Client, clus return nil, fmt.Errorf("selected question option not found") } -func GetVirtualClusterInstances(ctx context.Context, baseClient client.Client) ([]*VirtualClusterInstanceProject, error) { +func GetVirtualClusterInstances(ctx context.Context, baseClient platform.Client) ([]*VirtualClusterInstanceProject, error) { managementClient, err := baseClient.Management() if err != nil { return nil, err @@ -689,7 +689,7 @@ func CanAccessInstance(ctx context.Context, managementClient kube.Interface, nam return true, nil } -func GetSpaceInstances(ctx context.Context, baseClient client.Client) ([]*SpaceInstanceProject, error) { +func GetSpaceInstances(ctx context.Context, baseClient platform.Client) ([]*SpaceInstanceProject, error) { managementClient, err := baseClient.Management() if err != nil { return nil, err @@ -745,7 +745,7 @@ func GetProjectSecrets(ctx context.Context, managementClient kube.Interface, pro var retSecrets []*ProjectProjectSecret for _, project := range projects { - projectSecrets, err := managementClient.Loft().ManagementV1().ProjectSecrets(naming.ProjectNamespace(project.Name)).List(ctx, metav1.ListOptions{}) + projectSecrets, err := managementClient.Loft().ManagementV1().ProjectSecrets(projectutil.ProjectNamespace(project.Name)).List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } @@ -774,7 +774,7 @@ type ClusterSpace struct { } // GetSpaces returns all spaces accessible by the user or team -func GetSpaces(ctx context.Context, baseClient client.Client, log log.Logger) ([]ClusterSpace, error) { +func GetSpaces(ctx context.Context, baseClient platform.Client, log log.Logger) ([]ClusterSpace, error) { managementClient, err := baseClient.Management() if err != nil { return nil, err @@ -822,7 +822,7 @@ type ClusterVirtualCluster struct { } // GetVirtualClusters returns all virtual clusters the user has access to -func GetVirtualClusters(ctx context.Context, baseClient client.Client, log log.Logger) ([]ClusterVirtualCluster, error) { +func GetVirtualClusters(ctx context.Context, baseClient platform.Client, log log.Logger) ([]ClusterVirtualCluster, error) { managementClient, err := baseClient.Management() if err != nil { return nil, err @@ -865,7 +865,7 @@ func GetVirtualClusters(ctx context.Context, baseClient client.Client, log log.L } // SelectSpaceAndClusterName selects a space and cluster name -func SelectSpaceAndClusterName(ctx context.Context, baseClient client.Client, spaceName, clusterName string, log log.Logger) (string, string, error) { +func SelectSpaceAndClusterName(ctx context.Context, baseClient platform.Client, spaceName, clusterName string, log log.Logger) (string, string, error) { spaces, err := GetSpaces(ctx, baseClient, log) if err != nil { return "", "", err @@ -946,7 +946,7 @@ func GetCurrentUser(ctx context.Context, managementClient kube.Interface) (*mana return self.Status.User, self.Status.Team, nil } -func SelectVirtualClusterAndSpaceAndClusterName(ctx context.Context, baseClient client.Client, virtualClusterName, spaceName, clusterName string, log log.Logger) (string, string, string, error) { +func SelectVirtualClusterAndSpaceAndClusterName(ctx context.Context, baseClient platform.Client, virtualClusterName, spaceName, clusterName string, log log.Logger) (string, string, string, error) { virtualClusters, err := GetVirtualClusters(ctx, baseClient, log) if err != nil { return "", "", "", err @@ -1056,7 +1056,7 @@ func getProjectSpaceInstance(ctx context.Context, managementClient kube.Interfac err := managementClient.Loft().ManagementV1().RESTClient(). Get(). Resource("spaceinstances"). - Namespace(naming.ProjectNamespace(project.Name)). + Namespace(projectutil.ProjectNamespace(project.Name)). Name(spaceName). VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec). Param("extended", "true"). @@ -1081,7 +1081,7 @@ func getProjectSpaceInstances(ctx context.Context, managementClient kube.Interfa err := managementClient.Loft().ManagementV1().RESTClient(). Get(). Resource("spaceinstances"). - Namespace(naming.ProjectNamespace(project.Name)). + Namespace(projectutil.ProjectNamespace(project.Name)). VersionedParams(&metav1.ListOptions{}, scheme.ParameterCodec). Param("extended", "true"). Do(ctx). @@ -1110,7 +1110,7 @@ func getProjectVirtualClusterInstance(ctx context.Context, managementClient kube err := managementClient.Loft().ManagementV1().RESTClient(). Get(). Resource("virtualclusterinstances"). - Namespace(naming.ProjectNamespace(project.Name)). + Namespace(projectutil.ProjectNamespace(project.Name)). Name(virtualClusterName). VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec). Param("extended", "true"). @@ -1135,7 +1135,7 @@ func getProjectVirtualClusterInstances(ctx context.Context, managementClient kub err := managementClient.Loft().ManagementV1().RESTClient(). Get(). Resource("virtualclusterinstances"). - Namespace(naming.ProjectNamespace(project.Name)). + Namespace(projectutil.ProjectNamespace(project.Name)). VersionedParams(&metav1.ListOptions{}, scheme.ParameterCodec). Param("extended", "true"). Do(ctx). From 6f98fc4b6ad405e14bb882b883307731c601e059 Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 15:59:17 +0200 Subject: [PATCH 11/17] fix --- cmd/vclusterctl/cmd/platform/list/list.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/vclusterctl/cmd/platform/list/list.go b/cmd/vclusterctl/cmd/platform/list/list.go index 4e84dfa6e..d23168cb2 100644 --- a/cmd/vclusterctl/cmd/platform/list/list.go +++ b/cmd/vclusterctl/cmd/platform/list/list.go @@ -19,6 +19,6 @@ func NewListCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) *cobra.Command } // TODO: change that with the actual globalFlag variable - listCmd.AddCommand(NewClustersCmd(globalFlags)) + listCmd.AddCommand(NewClustersCmd(globalFlags, cfg)) return listCmd } From 243e43e3bf2a2d8843301acc014215ee0da032a2 Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 16:03:49 +0200 Subject: [PATCH 12/17] fix linting --- pkg/platform/platformclihelper/clihelper.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/platform/platformclihelper/clihelper.go b/pkg/platform/platformclihelper/clihelper.go index 5351e4235..ec8eea52c 100644 --- a/pkg/platform/platformclihelper/clihelper.go +++ b/pkg/platform/platformclihelper/clihelper.go @@ -93,11 +93,10 @@ func GetLoftIngressHost(ctx context.Context, kubeClient kubernetes.Interface, na ingress, err := kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) if err != nil { return "", err - } else { - // find host - for _, rule := range ingress.Spec.Rules { - return rule.Host, nil - } + } + // find host + for _, rule := range ingress.Spec.Rules { + return rule.Host, nil } } else { // find host From 4df1481f3d6e15f98da10ec037695380b6be6399 Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 16:10:08 +0200 Subject: [PATCH 13/17] removed cli reset --- pkg/cli/reset/password.go | 180 -------------------------------------- pkg/cli/reset/reset.go | 21 ----- 2 files changed, 201 deletions(-) delete mode 100644 pkg/cli/reset/password.go delete mode 100644 pkg/cli/reset/reset.go diff --git a/pkg/cli/reset/password.go b/pkg/cli/reset/password.go deleted file mode 100644 index 43f928014..000000000 --- a/pkg/cli/reset/password.go +++ /dev/null @@ -1,180 +0,0 @@ -package reset - -import ( - "context" - "crypto/sha256" - "fmt" - "strings" - - storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" - "github.com/loft-sh/api/v4/pkg/product" - "github.com/loft-sh/loftctl/v4/pkg/kube" - "github.com/loft-sh/loftctl/v4/pkg/random" - "github.com/loft-sh/log" - "github.com/loft-sh/log/survey" - "github.com/loft-sh/vcluster/pkg/cli/flags" - "github.com/pkg/errors" - "github.com/spf13/cobra" - corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" -) - -// PasswordCmd holds the lags -type PasswordCmd struct { - *flags.GlobalFlags - - User string - Password string - Create bool - Force bool - - Log log.Logger -} - -// NewPasswordCmd creates a new command -func NewPasswordCmd(globalFlags *flags.GlobalFlags) *cobra.Command { - cmd := &PasswordCmd{ - GlobalFlags: globalFlags, - Log: log.GetInstance(), - } - description := product.ReplaceWithHeader("reset password", ` -Resets the password of a user. - -Example: -loft reset password -loft reset password --user admin -####################################################### - `) - c := &cobra.Command{ - Use: "password", - Short: "Resets the password of a user", - Long: description, - Args: cobra.NoArgs, - RunE: func(_ *cobra.Command, _ []string) error { - return cmd.Run() - }, - } - - c.Flags().StringVar(&cmd.User, "user", "admin", "The name of the user to reset the password") - c.Flags().StringVar(&cmd.Password, "password", "", "The new password to use") - c.Flags().BoolVar(&cmd.Create, "create", false, "Creates the user if it does not exist") - c.Flags().BoolVar(&cmd.Force, "force", false, "If user had no password will create one") - return c -} - -// Run executes the functionality -func (cmd *PasswordCmd) Run() error { - restConfig, err := ctrl.GetConfig() - if err != nil { - return errors.Wrap(err, "get kube config") - } - - managementClient, err := kube.NewForConfig(restConfig) - if err != nil { - return err - } - - // get user - cmd.Log.Infof("Resetting password of user %s", cmd.User) - user, err := managementClient.Loft().StorageV1().Users().Get(context.Background(), cmd.User, metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return errors.Wrap(err, "get user") - } else if kerrors.IsNotFound(err) { - // create user - if !cmd.Create { - return fmt.Errorf("user %s was not found, run with '--create' to create this user automatically", cmd.User) - } - - user, err = managementClient.Loft().StorageV1().Users().Create(context.Background(), &storagev1.User{ - ObjectMeta: metav1.ObjectMeta{ - Name: cmd.User, - }, - Spec: storagev1.UserSpec{ - Username: cmd.User, - Subject: cmd.User, - Groups: []string{ - "system:masters", - }, - PasswordRef: &storagev1.SecretRef{ - SecretName: "loft-password-" + random.RandomString(5), - SecretNamespace: "loft", - Key: "password", - }, - }, - }, metav1.CreateOptions{}) - if err != nil { - return err - } - } - - // check if user had a password before - if user.Spec.PasswordRef == nil || user.Spec.PasswordRef.SecretName == "" || user.Spec.PasswordRef.SecretNamespace == "" || user.Spec.PasswordRef.Key == "" { - if !cmd.Force { - return fmt.Errorf("user %s had no password. If you want to force password creation, please run with the '--force' flag", cmd.User) - } - - user.Spec.PasswordRef = &storagev1.SecretRef{ - SecretName: "loft-password-" + random.RandomString(5), - SecretNamespace: "loft", - Key: "password", - } - user, err = managementClient.Loft().StorageV1().Users().Update(context.Background(), user, metav1.UpdateOptions{}) - if err != nil { - return errors.Wrap(err, "update user") - } - } - - // now ask user for new password - password := cmd.Password - if password == "" { - for { - password, err = cmd.Log.Question(&survey.QuestionOptions{ - Question: "Please enter a new password", - IsPassword: true, - }) - password = strings.TrimSpace(password) - if err != nil { - return err - } else if password == "" { - cmd.Log.Error("Please enter a password") - continue - } - - break - } - } - passwordHash := []byte(fmt.Sprintf("%x", sha256.Sum256([]byte(password)))) - - // check if secret exists - passwordSecret, err := managementClient.CoreV1().Secrets(user.Spec.PasswordRef.SecretNamespace).Get(context.Background(), user.Spec.PasswordRef.SecretName, metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return err - } else if kerrors.IsNotFound(err) { - _, err = managementClient.CoreV1().Secrets(user.Spec.PasswordRef.SecretNamespace).Create(context.Background(), &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: user.Spec.PasswordRef.SecretName, - Namespace: user.Spec.PasswordRef.SecretNamespace, - }, - Data: map[string][]byte{ - user.Spec.PasswordRef.Key: passwordHash, - }, - }, metav1.CreateOptions{}) - if err != nil { - return errors.Wrap(err, "create password secret") - } - } else { - if passwordSecret.Data == nil { - passwordSecret.Data = map[string][]byte{} - } - passwordSecret.Data[user.Spec.PasswordRef.Key] = passwordHash - _, err = managementClient.CoreV1().Secrets(user.Spec.PasswordRef.SecretNamespace).Update(context.Background(), passwordSecret, metav1.UpdateOptions{}) - if err != nil { - return errors.Wrap(err, "update password secret") - } - } - - cmd.Log.Donef("Successfully reset password of user %s", cmd.User) - return nil -} diff --git a/pkg/cli/reset/reset.go b/pkg/cli/reset/reset.go deleted file mode 100644 index bd0e980ee..000000000 --- a/pkg/cli/reset/reset.go +++ /dev/null @@ -1,21 +0,0 @@ -package reset - -import ( - "github.com/loft-sh/api/v4/pkg/product" - "github.com/loft-sh/vcluster/pkg/cli/flags" - "github.com/spf13/cobra" -) - -// NewResetCmd creates a new cobra command -func NewResetCmd(globalFlags *flags.GlobalFlags) *cobra.Command { - description := product.ReplaceWithHeader("reset", "") - c := &cobra.Command{ - Use: "reset", - Short: "Reset configuration", - Long: description, - Args: cobra.NoArgs, - } - - c.AddCommand(NewPasswordCmd(globalFlags)) - return c -} From 30dd7eb5e2404130235bc32836441d531fd6b9a9 Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 16:16:44 +0200 Subject: [PATCH 14/17] fix dep --- cmd/vclusterctl/cmd/platform/add/cluster.go | 2 +- pkg/platform/{platformclihelper => clihelper}/clihelper.go | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename pkg/platform/{platformclihelper => clihelper}/clihelper.go (100%) diff --git a/cmd/vclusterctl/cmd/platform/add/cluster.go b/cmd/vclusterctl/cmd/platform/add/cluster.go index 94d701cf8..4eaaf0e0d 100644 --- a/cmd/vclusterctl/cmd/platform/add/cluster.go +++ b/cmd/vclusterctl/cmd/platform/add/cluster.go @@ -19,9 +19,9 @@ import ( "github.com/loft-sh/vcluster/pkg/cli/config" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/loft-sh/vcluster/pkg/platform" + "github.com/loft-sh/vcluster/pkg/platform/clihelper" "github.com/loft-sh/vcluster/pkg/platform/helper" "github.com/loft-sh/vcluster/pkg/platform/kube" - clihelper "github.com/loft-sh/vcluster/pkg/platform/platformclihelper" "github.com/loft-sh/vcluster/pkg/upgrade" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/platform/platformclihelper/clihelper.go b/pkg/platform/clihelper/clihelper.go similarity index 100% rename from pkg/platform/platformclihelper/clihelper.go rename to pkg/platform/clihelper/clihelper.go From fb23fafdd5541b9da5f2947277c0de76bda8da5d Mon Sep 17 00:00:00 2001 From: facchettos Date: Mon, 27 May 2024 16:30:52 +0200 Subject: [PATCH 15/17] fix import --- pkg/platform/helper/helper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/platform/helper/helper.go b/pkg/platform/helper/helper.go index 1775ef5bd..189caa180 100644 --- a/pkg/platform/helper/helper.go +++ b/pkg/platform/helper/helper.go @@ -16,9 +16,9 @@ import ( "github.com/loft-sh/log" "github.com/loft-sh/log/survey" "github.com/loft-sh/vcluster/pkg/platform" + "github.com/loft-sh/vcluster/pkg/platform/clihelper" "github.com/loft-sh/vcluster/pkg/platform/kube" "github.com/loft-sh/vcluster/pkg/platform/kubeconfig" - clihelper "github.com/loft-sh/vcluster/pkg/platform/platformclihelper" "github.com/loft-sh/vcluster/pkg/projectutil" "github.com/mgutz/ansi" kerrors "k8s.io/apimachinery/pkg/api/errors" From ee644611edceb40a8edafa9c18054ddffd78e68f Mon Sep 17 00:00:00 2001 From: facchettos Date: Tue, 28 May 2024 10:53:20 +0200 Subject: [PATCH 16/17] applied suggestions --- cmd/vclusterctl/cmd/platform/add/add.go | 2 +- cmd/vclusterctl/cmd/platform/add/cluster.go | 2 +- cmd/vclusterctl/cmd/platform/get/cluster.go | 5 +---- cmd/vclusterctl/cmd/platform/list/list.go | 1 - cmd/vclusterctl/cmd/platform/pro.go | 6 +----- cmd/vclusterctl/cmd/platform/start.go | 4 ++-- 6 files changed, 6 insertions(+), 14 deletions(-) diff --git a/cmd/vclusterctl/cmd/platform/add/add.go b/cmd/vclusterctl/cmd/platform/add/add.go index dad6b9425..a625c1b5a 100644 --- a/cmd/vclusterctl/cmd/platform/add/add.go +++ b/cmd/vclusterctl/cmd/platform/add/add.go @@ -11,7 +11,7 @@ func NewAddCmd(globalFlags *flags.GlobalFlags) *cobra.Command { Use: "add", Short: "Adds a cluster to vCluster platform", Long: `####################################################### -########### vcluster platform add ################# +############# vcluster platform add ################### ####################################################### `, Args: cobra.NoArgs, diff --git a/cmd/vclusterctl/cmd/platform/add/cluster.go b/cmd/vclusterctl/cmd/platform/add/cluster.go index 4eaaf0e0d..12f1df358 100644 --- a/cmd/vclusterctl/cmd/platform/add/cluster.go +++ b/cmd/vclusterctl/cmd/platform/add/cluster.go @@ -56,7 +56,7 @@ func NewClusterCmd(globalFlags *flags.GlobalFlags) *cobra.Command { Use: "cluster", Short: "add current cluster to vCluster platform", Long: `####################################################### -########## vcluster platform add cluster ########## +############ vcluster platform add cluster ############ ####################################################### Adds a cluster to the vCluster platform instance. diff --git a/cmd/vclusterctl/cmd/platform/get/cluster.go b/cmd/vclusterctl/cmd/platform/get/cluster.go index 2f796c6de..eed178f94 100644 --- a/cmd/vclusterctl/cmd/platform/get/cluster.go +++ b/cmd/vclusterctl/cmd/platform/get/cluster.go @@ -52,10 +52,7 @@ func (c *clusterCmd) Run(ctx context.Context, _ []string) error { return err } - kubeContext := os.Getenv("DEVSPACE_PLUGIN_KUBE_CONTEXT_FLAG") - if kubeContext == "" { - kubeContext = kubeConfig.CurrentContext - } + kubeContext := kubeConfig.CurrentContext cluster, ok := kubeConfig.Clusters[kubeContext] if !ok { diff --git a/cmd/vclusterctl/cmd/platform/list/list.go b/cmd/vclusterctl/cmd/platform/list/list.go index d23168cb2..7f6331872 100644 --- a/cmd/vclusterctl/cmd/platform/list/list.go +++ b/cmd/vclusterctl/cmd/platform/list/list.go @@ -18,7 +18,6 @@ func NewListCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) *cobra.Command Args: cobra.NoArgs, } - // TODO: change that with the actual globalFlag variable listCmd.AddCommand(NewClustersCmd(globalFlags, cfg)) return listCmd } diff --git a/cmd/vclusterctl/cmd/platform/pro.go b/cmd/vclusterctl/cmd/platform/pro.go index 241a36d1f..a0cecc3d2 100644 --- a/cmd/vclusterctl/cmd/platform/pro.go +++ b/cmd/vclusterctl/cmd/platform/pro.go @@ -1,7 +1,6 @@ package platform import ( - "github.com/loft-sh/log" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/connect" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/spf13/cobra" @@ -20,10 +19,7 @@ Deprecated, please use vcluster platform instead Args: cobra.NoArgs, } - startCmd, err := NewStartCmd(globalFlags) - if err != nil { - log.Default.Debugf(err.Error()) - } + startCmd := NewStartCmd(globalFlags) proCmd.AddCommand(startCmd) proCmd.AddCommand(NewResetCmd(globalFlags)) diff --git a/cmd/vclusterctl/cmd/platform/start.go b/cmd/vclusterctl/cmd/platform/start.go index c4174fb3f..d3bb33230 100644 --- a/cmd/vclusterctl/cmd/platform/start.go +++ b/cmd/vclusterctl/cmd/platform/start.go @@ -19,7 +19,7 @@ type StartCmd struct { start.Options } -func NewStartCmd(globalFlags *flags.GlobalFlags) (*cobra.Command, error) { +func NewStartCmd(globalFlags *flags.GlobalFlags) *cobra.Command { cmd := &StartCmd{ Options: start.Options{ GlobalFlags: globalFlags, @@ -71,7 +71,7 @@ before running this command: startCmd.Flags().StringVar(&cmd.ChartRepo, "chart-repo", "https://charts.loft.sh/", "The chart repo to deploy vCluster platform") startCmd.Flags().StringVar(&cmd.ChartName, "chart-name", "vcluster-platform", "The chart name to deploy vCluster platform") - return startCmd, nil + return startCmd } func (cmd *StartCmd) Run(ctx context.Context) error { From 93923a0f07a4922f67ef9857f12b3063e8ddc8d7 Mon Sep 17 00:00:00 2001 From: facchettos Date: Tue, 28 May 2024 10:59:29 +0200 Subject: [PATCH 17/17] new signature --- cmd/vclusterctl/cmd/platform/platform.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/cmd/vclusterctl/cmd/platform/platform.go b/cmd/vclusterctl/cmd/platform/platform.go index 6e2aed704..a9b3fd930 100644 --- a/cmd/vclusterctl/cmd/platform/platform.go +++ b/cmd/vclusterctl/cmd/platform/platform.go @@ -1,8 +1,6 @@ package platform import ( - "fmt" - "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/add" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/connect" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/get" @@ -23,10 +21,7 @@ func NewPlatformCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) (*cobra.Com Args: cobra.NoArgs, } - startCmd, err := NewStartCmd(globalFlags) - if err != nil { - return nil, fmt.Errorf("failed to create vcluster platform start command: %w", err) - } + startCmd := NewStartCmd(globalFlags) platformCmd.AddCommand(startCmd) platformCmd.AddCommand(NewResetCmd(globalFlags))