diff --git a/cmd/vclusterctl/cmd/platform/add/add.go b/cmd/vclusterctl/cmd/platform/add/add.go new file mode 100644 index 000000000..a625c1b5a --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/add/add.go @@ -0,0 +1,22 @@ +package add + +import ( + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/spf13/cobra" +) + +// NewAddCmd creates a new command +func NewAddCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + addCmd := &cobra.Command{ + Use: "add", + Short: "Adds a cluster to vCluster platform", + Long: `####################################################### +############# vcluster platform add ################### +####################################################### + `, + Args: cobra.NoArgs, + } + + addCmd.AddCommand(NewClusterCmd(globalFlags)) + return addCmd +} diff --git a/cmd/vclusterctl/cmd/platform/add/cluster.go b/cmd/vclusterctl/cmd/platform/add/cluster.go new file mode 100644 index 000000000..12f1df358 --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/add/cluster.go @@ -0,0 +1,308 @@ +package add + +import ( + "cmp" + "context" + "errors" + "fmt" + "os" + "os/exec" + "time" + + "github.com/loft-sh/log" + "github.com/sirupsen/logrus" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" + + managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" + storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" + "github.com/loft-sh/vcluster/pkg/cli/config" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/platform" + "github.com/loft-sh/vcluster/pkg/platform/clihelper" + "github.com/loft-sh/vcluster/pkg/platform/helper" + "github.com/loft-sh/vcluster/pkg/platform/kube" + "github.com/loft-sh/vcluster/pkg/upgrade" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +type ClusterCmd struct { + Log log.Logger + *flags.GlobalFlags + Cfg *config.CLI + Namespace string + ServiceAccount string + DisplayName string + Context string + Insecure bool + Wait bool + HelmChartPath string + HelmChartVersion string + HelmSet []string + HelmValues []string +} + +// NewClusterCmd creates a new command +func NewClusterCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + cmd := &ClusterCmd{ + GlobalFlags: globalFlags, + Log: log.GetInstance(), + } + + c := &cobra.Command{ + Use: "cluster", + Short: "add current cluster to vCluster platform", + Long: `####################################################### +############ vcluster platform add cluster ############ +####################################################### +Adds a cluster to the vCluster platform instance. + +Example: +vcluster platform add cluster my-cluster +######################################################## + `, + Args: cobra.ExactArgs(1), + RunE: func(cobraCmd *cobra.Command, args []string) error { + // Check for newer version + upgrade.PrintNewerVersionWarning() + + return cmd.Run(cobraCmd.Context(), args) + }, + } + + c.Flags().StringVar(&cmd.Namespace, "namespace", "loft", "The namespace to generate the service account in. The namespace will be created if it does not exist") + c.Flags().StringVar(&cmd.ServiceAccount, "service-account", "loft-admin", "The service account name to create") + c.Flags().StringVar(&cmd.DisplayName, "display-name", "", "The display name to show in the UI for this cluster") + c.Flags().BoolVar(&cmd.Wait, "wait", false, "If true, will wait until the cluster is initialized") + c.Flags().BoolVar(&cmd.Insecure, "insecure", false, "If true, deploys the agent in insecure mode") + c.Flags().StringVar(&cmd.HelmChartVersion, "helm-chart-version", "", "The agent chart version to deploy") + c.Flags().StringVar(&cmd.HelmChartPath, "helm-chart-path", "", "The agent chart to deploy") + c.Flags().StringArrayVar(&cmd.HelmSet, "helm-set", []string{}, "Extra helm values for the agent chart") + c.Flags().StringArrayVar(&cmd.HelmValues, "helm-values", []string{}, "Extra helm values for the agent chart") + c.Flags().StringVar(&cmd.Context, "context", "", "The kube context to use for installation") + + return c +} + +func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { + // Get clusterName from command argument + clusterName := args[0] + + platformClient, err := platform.NewClientFromConfig(ctx, cmd.Cfg) + if err != nil { + return fmt.Errorf("new client from path: %w", err) + } + + err = platform.VerifyVersion(platformClient) + if err != nil { + return fmt.Errorf("verify loft version: %w", err) + } + + managementClient, err := platformClient.Management() + if err != nil { + return fmt.Errorf("create management client: %w", err) + } + + // get user details + user, team, err := getUserOrTeam(ctx, managementClient) + if err != nil { + return fmt.Errorf("get user or team: %w", err) + } + + loftVersion, err := platformClient.Version() + if err != nil { + return fmt.Errorf("get loft version: %w", err) + } + + // TODO(ThomasK33): Eventually change this into an Apply instead of a Create call + _, err = managementClient.Loft().ManagementV1().Clusters().Create(ctx, &managementv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + }, + Spec: managementv1.ClusterSpec{ + ClusterSpec: storagev1.ClusterSpec{ + DisplayName: cmd.DisplayName, + Owner: &storagev1.UserOrTeam{ + User: user, + Team: team, + }, + NetworkPeer: true, + Access: getAccess(user, team), + }, + }, + }, metav1.CreateOptions{}) + if err != nil && !kerrors.IsAlreadyExists(err) { + return fmt.Errorf("create cluster: %w", err) + } + + accessKey, err := managementClient.Loft().ManagementV1().Clusters().GetAccessKey(ctx, clusterName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("get cluster access key: %w", err) + } + + namespace := cmd.Namespace + + helmArgs := []string{ + "upgrade", "loft", + } + + if os.Getenv("DEVELOPMENT") == "true" { + helmArgs = []string{ + "upgrade", "--install", "loft", "./chart", + "--create-namespace", + "--namespace", namespace, + "--set", "agentOnly=true", + "--set", "image=" + cmp.Or(os.Getenv("DEVELOPMENT_IMAGE"), "ghcr.io/loft-sh/enterprise:release-test"), + } + } else { + if cmd.HelmChartPath != "" { + helmArgs = append(helmArgs, cmd.HelmChartPath) + } else { + helmArgs = append(helmArgs, "loft", "--repo", "https://charts.loft.sh") + } + + if loftVersion.Version != "" { + helmArgs = append(helmArgs, "--version", loftVersion.Version) + } + + if cmd.HelmChartVersion != "" { + helmArgs = append(helmArgs, "--version", cmd.HelmChartVersion) + } + + // general arguments + helmArgs = append(helmArgs, "--install", "--create-namespace", "--namespace", cmd.Namespace, "--set", "agentOnly=true") + } + + for _, set := range cmd.HelmSet { + helmArgs = append(helmArgs, "--set", set) + } + for _, values := range cmd.HelmValues { + helmArgs = append(helmArgs, "--values", values) + } + + if accessKey.LoftHost != "" { + helmArgs = append(helmArgs, "--set", "url="+accessKey.LoftHost) + } + + if accessKey.AccessKey != "" { + helmArgs = append(helmArgs, "--set", "token="+accessKey.AccessKey) + } + + if cmd.Insecure || accessKey.Insecure { + helmArgs = append(helmArgs, "--set", "insecureSkipVerify=true") + } + + if accessKey.CaCert != "" { + helmArgs = append(helmArgs, "--set", "additionalCA="+accessKey.CaCert) + } + + if cmd.Wait { + helmArgs = append(helmArgs, "--wait") + } + + if cmd.Context != "" { + helmArgs = append(helmArgs, "--kube-context", cmd.Context) + } + + kubeClientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}) + + if cmd.Context != "" { + kubeConfig, err := kubeClientConfig.RawConfig() + if err != nil { + return fmt.Errorf("there is an error loading your current kube config (%w), please make sure you have access to a kubernetes cluster and the command `kubectl get namespaces` is working", err) + } + + kubeClientConfig = clientcmd.NewNonInteractiveClientConfig(kubeConfig, cmd.Context, &clientcmd.ConfigOverrides{}, clientcmd.NewDefaultClientConfigLoadingRules()) + } + + config, err := kubeClientConfig.ClientConfig() + if err != nil { + return fmt.Errorf("there is an error loading your current kube config (%w), please make sure you have access to a kubernetes cluster and the command `kubectl get namespaces` is working", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("create kube client: %w", err) + } + + errChan := make(chan error) + + go func() { + helmCmd := exec.CommandContext(ctx, "helm", helmArgs...) + + helmCmd.Stdout = cmd.Log.Writer(logrus.DebugLevel, true) + helmCmd.Stderr = cmd.Log.Writer(logrus.DebugLevel, true) + helmCmd.Stdin = os.Stdin + + cmd.Log.Info("Installing Loft agent...") + cmd.Log.Debugf("Running helm command: %v", helmCmd.Args) + + err = helmCmd.Run() + if err != nil { + errChan <- fmt.Errorf("failed to install loft chart: %w", err) + } + + close(errChan) + }() + + _, err = clihelper.WaitForReadyLoftPod(ctx, clientset, namespace, cmd.Log) + if err = errors.Join(err, <-errChan); err != nil { + return fmt.Errorf("wait for loft pod: %w", err) + } + + if cmd.Wait { + cmd.Log.Info("Waiting for the cluster to be initialized...") + waitErr := wait.PollUntilContextTimeout(ctx, time.Second, 5*time.Minute, false, func(ctx context.Context) (done bool, err error) { + clusterInstance, err := managementClient.Loft().ManagementV1().Clusters().Get(ctx, clusterName, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return false, err + } + + return clusterInstance.Status.Phase == storagev1.ClusterStatusPhaseInitialized, nil + }) + if waitErr != nil { + return fmt.Errorf("get cluster: %w", waitErr) + } + } + + cmd.Log.Donef("Successfully added cluster %s to Loft", clusterName) + + return nil +} + +func getUserOrTeam(ctx context.Context, managementClient kube.Interface) (string, string, error) { + var user, team string + + userName, teamName, err := helper.GetCurrentUser(ctx, managementClient) + if err != nil { + return "", "", fmt.Errorf("get current user: %w", err) + } + + if userName != nil { + user = userName.Name + } else { + team = teamName.Name + } + + return user, team, nil +} + +func getAccess(user, team string) []storagev1.Access { + access := []storagev1.Access{ + { + Verbs: []string{"*"}, + Subresources: []string{"*"}, + }, + } + + if team != "" { + access[0].Teams = []string{team} + } else { + access[0].Users = []string{user} + } + + return access +} diff --git a/cmd/vclusterctl/cmd/platform/connect/cluster.go b/cmd/vclusterctl/cmd/platform/connect/cluster.go index 6749b2a8c..81ad8f5c6 100644 --- a/cmd/vclusterctl/cmd/platform/connect/cluster.go +++ b/cmd/vclusterctl/cmd/platform/connect/cluster.go @@ -1,280 +1,165 @@ package connect import ( - "cmp" "context" - "errors" + "encoding/base64" "fmt" "os" - "os/exec" - "time" - - "github.com/loft-sh/loftctl/v4/pkg/client/helper" - "github.com/loft-sh/loftctl/v4/pkg/clihelper" - "github.com/loft-sh/loftctl/v4/pkg/kube" - "github.com/loft-sh/log" - "github.com/sirupsen/logrus" - kerrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/wait" managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" - storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" - "github.com/loft-sh/loftctl/v4/pkg/upgrade" + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/log" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/loft-sh/vcluster/pkg/platform" + "github.com/loft-sh/vcluster/pkg/platform/kubeconfig" + "github.com/loft-sh/vcluster/pkg/upgrade" + "github.com/mgutz/ansi" "github.com/spf13/cobra" + kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" ) +const ( + // LoftDirectClusterEndpoint is a cluster annotation that tells the loft cli to use this endpoint instead of + // the default loft server address to connect to this cluster. + LoftDirectClusterEndpoint = "loft.sh/direct-cluster-endpoint" + + // LoftDirectClusterEndpointInsecure is a cluster annotation that tells the loft cli to allow untrusted certificates + LoftDirectClusterEndpointInsecure = "loft.sh/direct-cluster-endpoint-insecure" + + // LoftDirectClusterEndpointCaData is a cluster annotation that tells the loft cli which cluster ca data to use + LoftDirectClusterEndpointCaData = "loft.sh/direct-cluster-endpoint-ca-data" +) + +// ClusterCmd holds the cmd flags type ClusterCmd struct { *flags.GlobalFlags - Log log.Logger - Namespace string - ServiceAccount string - DisplayName string - Context string - Insecure bool - Wait bool + + Print bool + DisableDirectClusterEndpoint bool + + log log.Logger } // NewClusterCmd creates a new command func NewClusterCmd(globalFlags *flags.GlobalFlags) *cobra.Command { cmd := &ClusterCmd{ GlobalFlags: globalFlags, - Log: log.GetInstance(), + log: log.GetInstance(), } - c := &cobra.Command{ - Use: "cluster", - Short: "connect current cluster to vCluster platform", - Long: `####################################################### -########## vcluster platform connect cluster ########## -####################################################### -Connect a cluster to the vCluster platform instance. + description := product.ReplaceWithHeader("use cluster", ` +Creates a new kube context for the given cluster, if +it does not yet exist. Example: -vcluster platform connect cluster my-cluster +vcluster platform connect cluster mycluster ######################################################## - `, - Args: cobra.ExactArgs(1), + `) + c := &cobra.Command{ + Use: "cluster", + Short: "Creates a kube context for the given cluster", + Long: description, + Args: cobra.MaximumNArgs(1), RunE: func(cobraCmd *cobra.Command, args []string) error { // Check for newer version - upgrade.PrintNewerVersionWarning() + if !cmd.Print { + upgrade.PrintNewerVersionWarning() + } return cmd.Run(cobraCmd.Context(), args) }, } - c.Flags().StringVar(&cmd.Namespace, "namespace", "loft", "The namespace to generate the service account in. The namespace will be created if it does not exist") - c.Flags().StringVar(&cmd.ServiceAccount, "service-account", "loft-admin", "The service account name to create") - c.Flags().StringVar(&cmd.DisplayName, "display-name", "", "The display name to show in the UI for this cluster") - c.Flags().BoolVar(&cmd.Wait, "wait", false, "If true, will wait until the cluster is initialized") - c.Flags().BoolVar(&cmd.Insecure, "insecure", false, "If true, deploys the agent in insecure mode") - c.Flags().StringVar(&cmd.Context, "context", "", "The kube context to use for installation") - + c.Flags().BoolVar(&cmd.Print, "print", false, "When enabled prints the context to stdout") return c } +// Run executes the command func (cmd *ClusterCmd) Run(ctx context.Context, args []string) error { - // Get clusterName from command argument - clusterName := args[0] - - platformClient, err := platform.NewClientFromConfig(ctx, cmd.LoadedConfig(cmd.Log)) + platformClient, err := platform.NewClientFromConfig(ctx, cmd.LoadedConfig(cmd.log)) if err != nil { - return fmt.Errorf("new client from path: %w", err) - } - - err = platform.VerifyVersion(platformClient) - if err != nil { - return fmt.Errorf("verify loft version: %w", err) + return err } managementClient, err := platformClient.Management() if err != nil { - return fmt.Errorf("create management client: %w", err) - } - - // get user details - user, team, err := getUserOrTeam(ctx, managementClient) - if err != nil { - return fmt.Errorf("get user or team: %w", err) - } - - platformVersion, err := platformClient.Version() - if err != nil { - return fmt.Errorf("get loft version: %w", err) + return err } - // TODO(ThomasK33): Eventually change this into an Apply instead of a Create call - _, err = managementClient.Loft().ManagementV1().Clusters().Create(ctx, &managementv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName, - }, - Spec: managementv1.ClusterSpec{ - ClusterSpec: storagev1.ClusterSpec{ - DisplayName: cmd.DisplayName, - Owner: &storagev1.UserOrTeam{ - User: user, - Team: team, - }, - NetworkPeer: true, - Access: getAccess(user, team), - }, - }, - }, metav1.CreateOptions{}) - if err != nil && !kerrors.IsAlreadyExists(err) { - return fmt.Errorf("create cluster: %w", err) + // determine cluster name + clusterName := "" + if len(args) == 0 { + clusterName, err = platformClient.SelectCluster(ctx, cmd.log) + if err != nil { + return err + } + } else { + clusterName = args[0] } - accessKey, err := managementClient.Loft().ManagementV1().Clusters().GetAccessKey(ctx, clusterName, metav1.GetOptions{}) + // check if the cluster exists + cluster, err := managementClient.Loft().ManagementV1().Clusters().Get(ctx, clusterName, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("get cluster access key: %w", err) - } - - namespace := cmd.Namespace - - helmArgs := []string{ - "upgrade", "--install", "loft", "loft", - "--repo", "https://charts.loft.sh", - "--create-namespace", - "--namespace", namespace, - "--set", "agentOnly=true", - } - - if os.Getenv("DEVELOPMENT") == "true" { - helmArgs = []string{ - "upgrade", "--install", "loft", "./chart", - "--create-namespace", - "--namespace", namespace, - "--set", "agentOnly=true", - "--set", "image=" + cmp.Or(os.Getenv("DEVELOPMENT_IMAGE"), "ghcr.io/loft-sh/enterprise:release-test"), + if kerrors.IsForbidden(err) { + return fmt.Errorf("cluster '%s' does not exist, or you don't have permission to use it", clusterName) } - } else if platformVersion.Version != "" { - helmArgs = append(helmArgs, "--version", platformVersion.Version) - } - if accessKey.LoftHost != "" { - helmArgs = append(helmArgs, "--set", "url="+accessKey.LoftHost) + return err } - if accessKey.AccessKey != "" { - helmArgs = append(helmArgs, "--set", "token="+accessKey.AccessKey) - } - - if cmd.Insecure || accessKey.Insecure || platformClient.Config().Platform.Insecure { - helmArgs = append(helmArgs, "--set", "insecureSkipVerify=true") - } - - if accessKey.CaCert != "" { - helmArgs = append(helmArgs, "--set", "additionalCA="+accessKey.CaCert) - } - - if cmd.Wait { - helmArgs = append(helmArgs, "--wait") - } - - if cmd.Context != "" { - helmArgs = append(helmArgs, "--kube-context", cmd.Context) + // create kube context options + contextOptions, err := CreateClusterContextOptions(platformClient, cmd.Config, cluster, "", true) + if err != nil { + return err } - kubeClientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}) - - if cmd.Context != "" { - kubeConfig, err := kubeClientConfig.RawConfig() + // check if we should print or update the config + if cmd.Print { + err = kubeconfig.PrintKubeConfigTo(contextOptions, os.Stdout) if err != nil { - return fmt.Errorf("there is an error loading your current kube config (%w), please make sure you have access to a kubernetes cluster and the command `kubectl get namespaces` is working", err) + return err } - - kubeClientConfig = clientcmd.NewNonInteractiveClientConfig(kubeConfig, cmd.Context, &clientcmd.ConfigOverrides{}, clientcmd.NewDefaultClientConfigLoadingRules()) - } - - config, err := kubeClientConfig.ClientConfig() - if err != nil { - return fmt.Errorf("there is an error loading your current kube config (%w), please make sure you have access to a kubernetes cluster and the command `kubectl get namespaces` is working", err) - } - - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return fmt.Errorf("create kube client: %w", err) - } - - errChan := make(chan error) - - go func() { - helmCmd := exec.CommandContext(ctx, "helm", helmArgs...) - - helmCmd.Stdout = cmd.Log.Writer(logrus.DebugLevel, true) - helmCmd.Stderr = cmd.Log.Writer(logrus.DebugLevel, true) - helmCmd.Stdin = os.Stdin - - cmd.Log.Info("Installing Loft agent...") - cmd.Log.Debugf("Running helm command: %v", helmCmd.Args) - - err = helmCmd.Run() + } else { + // update kube config + err = kubeconfig.UpdateKubeConfig(contextOptions) if err != nil { - errChan <- fmt.Errorf("failed to install loft chart: %w", err) + return err } - close(errChan) - }() - - _, err = clihelper.WaitForReadyLoftPod(ctx, clientset, namespace, cmd.Log) - if err = errors.Join(err, <-errChan); err != nil { - return fmt.Errorf("wait for loft pod: %w", err) - } - - if cmd.Wait { - cmd.Log.Info("Waiting for the cluster to be initialized...") - waitErr := wait.PollUntilContextTimeout(ctx, time.Second, 5*time.Minute, false, func(ctx context.Context) (done bool, err error) { - clusterInstance, err := managementClient.Loft().ManagementV1().Clusters().Get(ctx, clusterName, metav1.GetOptions{}) - if err != nil && !kerrors.IsNotFound(err) { - return false, err - } - - return clusterInstance.Status.Phase == storagev1.ClusterStatusPhaseInitialized, nil - }) - if waitErr != nil { - return fmt.Errorf("get cluster: %w", waitErr) - } + cmd.log.Donef("Successfully updated kube context to use cluster %s", ansi.Color(clusterName, "white+b")) } - cmd.Log.Donef("Successfully connected cluster %s to Loft", clusterName) - return nil } -func getUserOrTeam(ctx context.Context, managementClient kube.Interface) (string, string, error) { - var user, team string - - userName, teamName, err := helper.GetCurrentUser(ctx, managementClient) - if err != nil { - return "", "", fmt.Errorf("get current user: %w", err) +func CreateClusterContextOptions(platformClient platform.Client, config string, cluster *managementv1.Cluster, spaceName string, setActive bool) (kubeconfig.ContextOptions, error) { + contextOptions := kubeconfig.ContextOptions{ + Name: kubeconfig.SpaceContextName(cluster.Name, spaceName), + ConfigPath: config, + CurrentNamespace: spaceName, + SetActive: setActive, } + contextOptions.Server = platformClient.Config().Platform.Host + "/kubernetes/cluster/" + cluster.Name + contextOptions.InsecureSkipTLSVerify = platformClient.Config().Platform.Insecure - if userName != nil { - user = userName.Name - } else { - team = teamName.Name + data, err := retrieveCaData(cluster) + if err != nil { + return kubeconfig.ContextOptions{}, err } - - return user, team, nil + contextOptions.CaData = data + return contextOptions, nil } -func getAccess(user, team string) []storagev1.Access { - access := []storagev1.Access{ - { - Verbs: []string{"*"}, - Subresources: []string{"*"}, - }, +func retrieveCaData(cluster *managementv1.Cluster) ([]byte, error) { + if cluster == nil || cluster.Annotations == nil || cluster.Annotations[LoftDirectClusterEndpointCaData] == "" { + return nil, nil } - if team != "" { - access[0].Teams = []string{team} - } else { - access[0].Users = []string{user} + data, err := base64.StdEncoding.DecodeString(cluster.Annotations[LoftDirectClusterEndpointCaData]) + if err != nil { + return nil, fmt.Errorf("error decoding cluster %s annotation: %w", LoftDirectClusterEndpointCaData, err) } - return access + return data, nil } diff --git a/cmd/vclusterctl/cmd/platform/connect/connect.go b/cmd/vclusterctl/cmd/platform/connect/connect.go index fb69afaf2..b201f8dcf 100644 --- a/cmd/vclusterctl/cmd/platform/connect/connect.go +++ b/cmd/vclusterctl/cmd/platform/connect/connect.go @@ -1,20 +1,22 @@ package connect import ( + "github.com/loft-sh/api/v4/pkg/product" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/spf13/cobra" ) -// NewConnectCmd creates a new command +// NewConnectCmd creates a new cobra command func NewConnectCmd(globalFlags *flags.GlobalFlags) *cobra.Command { + description := product.ReplaceWithHeader("use", ` + +Activates a kube context for the given cluster / space / vcluster / management. + `) connectCmd := &cobra.Command{ Use: "connect", - Short: "Connects a cluster to vCluster platform", - Long: `####################################################### -########### vcluster platform connect ################# -####################################################### - `, - Args: cobra.NoArgs, + Short: product.Replace("Uses loft resources"), + Long: description, + Args: cobra.NoArgs, } connectCmd.AddCommand(NewClusterCmd(globalFlags)) diff --git a/cmd/vclusterctl/cmd/platform/get/cluster.go b/cmd/vclusterctl/cmd/platform/get/cluster.go new file mode 100644 index 000000000..eed178f94 --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/get/cluster.go @@ -0,0 +1,182 @@ +package get + +import ( + "context" + "errors" + "os" + "strings" + "time" + + managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" + "github.com/loft-sh/loftctl/v4/pkg/config" + cliconfig "github.com/loft-sh/vcluster/pkg/cli/config" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/platform" + "github.com/loft-sh/vcluster/pkg/projectutil" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" +) + +var ( + ErrNotLoftContext = errors.New("current context is not a loft context, but predefined var LOFT_CLUSTER is used") +) + +type clusterCmd struct { + *flags.GlobalFlags + cfg *cliconfig.CLI +} + +func newClusterCmd(globalFlags *flags.GlobalFlags, cfg *cliconfig.CLI) *cobra.Command { + cmd := &clusterCmd{ + GlobalFlags: globalFlags, + cfg: cfg, + } + + return &cobra.Command{ + Use: "cluster", + Short: "Prints the current cluster", + Args: cobra.NoArgs, + RunE: func(cobraCmd *cobra.Command, args []string) error { + return cmd.Run(cobraCmd.Context(), args) + }, + } +} + +// Run executes the command logic +func (c *clusterCmd) Run(ctx context.Context, _ []string) error { + kubeConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return err + } + + kubeContext := kubeConfig.CurrentContext + + cluster, ok := kubeConfig.Clusters[kubeContext] + if !ok { + return ErrNotLoftContext + } + + isProject, projectName := isProjectContext(cluster) + if isProject { + platformClient, err := platform.NewClientFromConfig(ctx, c.cfg) + if err != nil { + return err + } + + managementClient, err := platformClient.Management() + if err != nil { + return err + } + + if isSpace, spaceName := isSpaceContext(cluster); isSpace { + var spaceInstance *managementv1.SpaceInstance + err := wait.PollUntilContextTimeout(ctx, time.Second, config.Timeout(), true, func(ctx context.Context) (bool, error) { + var err error + + spaceInstance, err = managementClient.Loft().ManagementV1().SpaceInstances(projectutil.ProjectNamespace(projectName)).Get(ctx, spaceName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + // Wait for space instance to be scheduled + if spaceInstance.Spec.ClusterRef.Cluster == "" { + return false, nil + } + + return true, nil + }) + if err != nil { + return err + } + + _, err = os.Stdout.Write([]byte(spaceInstance.Spec.ClusterRef.Cluster)) + return err + } + + if isVirtualCluster, virtualClusterName := isVirtualClusterContext(cluster); isVirtualCluster { + var virtualClusterInstance *managementv1.VirtualClusterInstance + err := wait.PollUntilContextTimeout(ctx, time.Second, config.Timeout(), true, func(ctx context.Context) (bool, error) { + var err error + + virtualClusterInstance, err = managementClient.Loft().ManagementV1().VirtualClusterInstances(projectutil.ProjectNamespace(projectName)).Get(ctx, virtualClusterName, metav1.GetOptions{}) + if err != nil { + return false, err + } + + // Wait for space instance to be scheduled + if virtualClusterInstance.Spec.ClusterRef.Cluster == "" { + return false, nil + } + + return true, nil + }) + if err != nil { + return err + } + + _, err = os.Stdout.Write([]byte(virtualClusterInstance.Spec.ClusterRef.Cluster)) + return err + } + + return ErrNotLoftContext + } + + server := strings.TrimSuffix(cluster.Server, "/") + splitted := strings.Split(server, "/") + if len(splitted) < 3 { + return ErrNotLoftContext + } else if splitted[len(splitted)-2] != "cluster" || splitted[len(splitted)-3] != "kubernetes" { + return ErrNotLoftContext + } + + _, err = os.Stdout.Write([]byte(splitted[len(splitted)-1])) + return err +} + +func isProjectContext(cluster *api.Cluster) (bool, string) { + server := strings.TrimSuffix(cluster.Server, "/") + splitted := strings.Split(server, "/") + + if len(splitted) < 8 { + return false, "" + } + + if splitted[4] == "project" { + return true, splitted[5] + } + + return false, "" +} + +func isSpaceContext(cluster *api.Cluster) (bool, string) { + server := strings.TrimSuffix(cluster.Server, "/") + splitted := strings.Split(server, "/") + + if len(splitted) < 8 { + return false, "" + } + + if splitted[6] == "space" { + return true, splitted[7] + } + + return false, "" +} + +func isVirtualClusterContext(cluster *api.Cluster) (bool, string) { + server := strings.TrimSuffix(cluster.Server, "/") + splitted := strings.Split(server, "/") + + if len(splitted) < 8 { + return false, "" + } + + if splitted[6] == "virtualcluster" { + return true, splitted[7] + } + + return false, "" +} diff --git a/cmd/vclusterctl/cmd/platform/get/get.go b/cmd/vclusterctl/cmd/platform/get/get.go new file mode 100644 index 000000000..8147cd995 --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/get/get.go @@ -0,0 +1,23 @@ +package get + +import ( + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/vcluster/pkg/cli/config" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/spf13/cobra" +) + +// NewGetCmd creates a new cobra command for the sub command +func NewGetCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) *cobra.Command { + description := product.ReplaceWithHeader("var", "") + + cmd := &cobra.Command{ + Use: "get", + Short: "Retrieves and display informations", + Long: description, + Args: cobra.NoArgs, + } + + cmd.AddCommand(newClusterCmd(globalFlags, cfg)) + return cmd +} diff --git a/cmd/vclusterctl/cmd/platform/import.go b/cmd/vclusterctl/cmd/platform/import.go index b56a919ce..a00ba494f 100644 --- a/cmd/vclusterctl/cmd/platform/import.go +++ b/cmd/vclusterctl/cmd/platform/import.go @@ -3,11 +3,12 @@ package platform import ( "context" - loftctlUtil "github.com/loft-sh/loftctl/v4/pkg/util" "github.com/loft-sh/log" "github.com/loft-sh/vcluster/pkg/cli" "github.com/loft-sh/vcluster/pkg/cli/config" "github.com/loft-sh/vcluster/pkg/cli/flags" + + loftctlUtil "github.com/loft-sh/vcluster/pkg/platform/loftutils" "github.com/spf13/cobra" ) diff --git a/cmd/vclusterctl/cmd/platform/list/clusters.go b/cmd/vclusterctl/cmd/platform/list/clusters.go new file mode 100644 index 000000000..5b6d0c2bc --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/list/clusters.go @@ -0,0 +1,84 @@ +package list + +import ( + "context" + "time" + + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/log" + "github.com/loft-sh/log/table" + "github.com/loft-sh/vcluster/pkg/cli/config" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/platform" + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/duration" +) + +// ClustersCmd holds the login cmd flags +type ClustersCmd struct { + *flags.GlobalFlags + + log log.Logger + cfg *config.CLI +} + +// NewClustersCmd creates a new spaces command +func NewClustersCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) *cobra.Command { + cmd := &ClustersCmd{ + GlobalFlags: globalFlags, + log: log.GetInstance(), + cfg: cfg, + } + description := product.ReplaceWithHeader("list clusters", ` +List the vcluster platform clusters you have access to + +Example: +vcluster platform list clusters +######################################################## + `) + clustersCmd := &cobra.Command{ + Use: "clusters", + Short: product.Replace("Lists the loft clusters you have access to"), + Long: description, + Args: cobra.NoArgs, + RunE: func(cobraCmd *cobra.Command, _ []string) error { + return cmd.RunClusters(cobraCmd.Context()) + }, + } + + return clustersCmd +} + +// RunClusters executes the functionality +func (cmd *ClustersCmd) RunClusters(ctx context.Context) error { + platformClient, err := platform.NewClientFromConfig(ctx, cmd.cfg) + if err != nil { + return err + } + + managementClient, err := platformClient.Management() + if err != nil { + return err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return err + } + + header := []string{ + "Cluster", + "Age", + } + values := [][]string{} + for _, cluster := range clusterList.Items { + values = append(values, []string{ + cluster.Name, + duration.HumanDuration(time.Since(cluster.CreationTimestamp.Time)), + }) + } + + table.PrintTable(cmd.log, header, values) + return nil +} diff --git a/cmd/vclusterctl/cmd/platform/list/list.go b/cmd/vclusterctl/cmd/platform/list/list.go new file mode 100644 index 000000000..7f6331872 --- /dev/null +++ b/cmd/vclusterctl/cmd/platform/list/list.go @@ -0,0 +1,23 @@ +package list + +import ( + "github.com/loft-sh/api/v4/pkg/product" + + "github.com/loft-sh/vcluster/pkg/cli/config" + "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/spf13/cobra" +) + +// NewListCmd creates a new cobra command +func NewListCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) *cobra.Command { + description := product.ReplaceWithHeader("list", "") + listCmd := &cobra.Command{ + Use: "list", + Short: "Lists configuration", + Long: description, + Args: cobra.NoArgs, + } + + listCmd.AddCommand(NewClustersCmd(globalFlags, cfg)) + return listCmd +} diff --git a/cmd/vclusterctl/cmd/platform/platform.go b/cmd/vclusterctl/cmd/platform/platform.go index 0bf527947..a9b3fd930 100644 --- a/cmd/vclusterctl/cmd/platform/platform.go +++ b/cmd/vclusterctl/cmd/platform/platform.go @@ -1,14 +1,16 @@ package platform import ( - "fmt" - + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/add" "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/connect" + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/get" + "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/list" + "github.com/loft-sh/vcluster/pkg/cli/config" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/spf13/cobra" ) -func NewPlatformCmd(globalFlags *flags.GlobalFlags) (*cobra.Command, error) { +func NewPlatformCmd(globalFlags *flags.GlobalFlags, cfg *config.CLI) (*cobra.Command, error) { platformCmd := &cobra.Command{ Use: "platform", Short: "vCluster platform subcommands", @@ -19,16 +21,16 @@ func NewPlatformCmd(globalFlags *flags.GlobalFlags) (*cobra.Command, error) { Args: cobra.NoArgs, } - startCmd, err := NewStartCmd(globalFlags) - if err != nil { - return nil, fmt.Errorf("failed to create vcluster platform start command: %w", err) - } + startCmd := NewStartCmd(globalFlags) platformCmd.AddCommand(startCmd) platformCmd.AddCommand(NewResetCmd(globalFlags)) - platformCmd.AddCommand(connect.NewConnectCmd(globalFlags)) + platformCmd.AddCommand(add.NewAddCmd(globalFlags)) platformCmd.AddCommand(NewAccessKeyCmd(globalFlags)) platformCmd.AddCommand(NewImportCmd(globalFlags)) + platformCmd.AddCommand(get.NewGetCmd(globalFlags, cfg)) + platformCmd.AddCommand(connect.NewConnectCmd(globalFlags)) + platformCmd.AddCommand(list.NewListCmd(globalFlags, cfg)) return platformCmd, nil } diff --git a/cmd/vclusterctl/cmd/platform/pro.go b/cmd/vclusterctl/cmd/platform/pro.go index 1fa355d6f..a0cecc3d2 100644 --- a/cmd/vclusterctl/cmd/platform/pro.go +++ b/cmd/vclusterctl/cmd/platform/pro.go @@ -1,8 +1,6 @@ package platform import ( - "fmt" - "github.com/loft-sh/vcluster/cmd/vclusterctl/cmd/platform/connect" "github.com/loft-sh/vcluster/pkg/cli/flags" "github.com/spf13/cobra" @@ -21,10 +19,7 @@ Deprecated, please use vcluster platform instead Args: cobra.NoArgs, } - startCmd, err := NewStartCmd(globalFlags) - if err != nil { - return nil, fmt.Errorf("failed to create vcluster pro start command: %w", err) - } + startCmd := NewStartCmd(globalFlags) proCmd.AddCommand(startCmd) proCmd.AddCommand(NewResetCmd(globalFlags)) diff --git a/cmd/vclusterctl/cmd/platform/reset.go b/cmd/vclusterctl/cmd/platform/reset.go index cd6d9d472..e215543fd 100644 --- a/cmd/vclusterctl/cmd/platform/reset.go +++ b/cmd/vclusterctl/cmd/platform/reset.go @@ -7,11 +7,11 @@ import ( "strings" storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" - "github.com/loft-sh/loftctl/v4/pkg/kube" "github.com/loft-sh/loftctl/v4/pkg/random" "github.com/loft-sh/log" "github.com/loft-sh/log/survey" "github.com/loft-sh/vcluster/pkg/cli/flags" + "github.com/loft-sh/vcluster/pkg/platform/kube" "github.com/pkg/errors" "github.com/spf13/cobra" corev1 "k8s.io/api/core/v1" diff --git a/cmd/vclusterctl/cmd/platform/start.go b/cmd/vclusterctl/cmd/platform/start.go index c4174fb3f..d3bb33230 100644 --- a/cmd/vclusterctl/cmd/platform/start.go +++ b/cmd/vclusterctl/cmd/platform/start.go @@ -19,7 +19,7 @@ type StartCmd struct { start.Options } -func NewStartCmd(globalFlags *flags.GlobalFlags) (*cobra.Command, error) { +func NewStartCmd(globalFlags *flags.GlobalFlags) *cobra.Command { cmd := &StartCmd{ Options: start.Options{ GlobalFlags: globalFlags, @@ -71,7 +71,7 @@ before running this command: startCmd.Flags().StringVar(&cmd.ChartRepo, "chart-repo", "https://charts.loft.sh/", "The chart repo to deploy vCluster platform") startCmd.Flags().StringVar(&cmd.ChartName, "chart-name", "vcluster-platform", "The chart name to deploy vCluster platform") - return startCmd, nil + return startCmd } func (cmd *StartCmd) Run(ctx context.Context) error { diff --git a/cmd/vclusterctl/cmd/root.go b/cmd/vclusterctl/cmd/root.go index dd6196354..b12e5c370 100644 --- a/cmd/vclusterctl/cmd/root.go +++ b/cmd/vclusterctl/cmd/root.go @@ -86,6 +86,7 @@ func BuildRoot(log log.Logger) (*cobra.Command, error) { rootCmd := NewRootCmd(log) persistentFlags := rootCmd.PersistentFlags() globalFlags = flags.SetGlobalFlags(persistentFlags, log) + cfg := globalFlags.LoadedConfig(log) // Set version for --version flag rootCmd.Version = upgrade.GetVersion() @@ -112,7 +113,7 @@ func BuildRoot(log log.Logger) (*cobra.Command, error) { return nil, fmt.Errorf("failed to create pro command: %w", err) } rootCmd.AddCommand(proCmd) - platformCmd, err := cmdpro.NewPlatformCmd(globalFlags) + platformCmd, err := cmdpro.NewPlatformCmd(globalFlags, cfg) if err != nil { return nil, fmt.Errorf("failed to create platform command: %w", err) } diff --git a/pkg/platform/client.go b/pkg/platform/client.go index b9462133e..cc8a4747d 100644 --- a/pkg/platform/client.go +++ b/pkg/platform/client.go @@ -84,6 +84,7 @@ type Client interface { ResolveTemplate(ctx context.Context, project, template, templateVersion string, setParams []string, fileParams string, log log.Logger) (*managementv1.VirtualClusterTemplate, string, error) SelectProjectOrCluster(ctx context.Context, clusterName, projectName string, allowClusterOnly bool, log log.Logger) (cluster string, project string, err error) + SelectCluster(ctx context.Context, log log.Logger) (string, error) ApplyPlatformSecret(ctx context.Context, kubeClient kubernetes.Interface, importName, namespace, project string) error diff --git a/pkg/platform/clihelper/clihelper.go b/pkg/platform/clihelper/clihelper.go new file mode 100644 index 000000000..ec8eea52c --- /dev/null +++ b/pkg/platform/clihelper/clihelper.go @@ -0,0 +1,773 @@ +package clihelper + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "sort" + "strconv" + "strings" + "time" + + clusterv1 "github.com/loft-sh/agentapi/v4/pkg/apis/loft/cluster/v1" + storagev1 "github.com/loft-sh/api/v4/pkg/apis/storage/v1" + "github.com/loft-sh/api/v4/pkg/product" + "github.com/loft-sh/loftctl/v4/pkg/httputil" + "github.com/sirupsen/logrus" + + jsonpatch "github.com/evanphx/json-patch" + loftclientset "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset" + "github.com/loft-sh/loftctl/v4/pkg/config" + "github.com/loft-sh/loftctl/v4/pkg/portforward" + "github.com/loft-sh/log" + "github.com/loft-sh/log/survey" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/transport/spdy" + "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" +) + +// CriticalStatus container status +var CriticalStatus = map[string]bool{ + "Error": true, + "Unknown": true, + "ImagePullBackOff": true, + "CrashLoopBackOff": true, + "RunContainerError": true, + "ErrImagePull": true, + "CreateContainerConfigError": true, + "InvalidImageName": true, +} + +const defaultReleaseName = "loft" + +const LoftRouterDomainSecret = "loft-router-domain" + +var defaultDeploymentName = "loft" + +func GetDisplayName(name string, displayName string) string { + if displayName != "" { + return displayName + } + + return name +} + +func GetTableDisplayName(name string, displayName string) string { + if displayName != "" && displayName != name { + return displayName + " (" + name + ")" + } + + return name +} + +func DisplayName(entityInfo *clusterv1.EntityInfo) string { + if entityInfo == nil { + return "" + } else if entityInfo.DisplayName != "" { + return entityInfo.DisplayName + } else if entityInfo.Username != "" { + return entityInfo.Username + } + + return entityInfo.Name +} + +func GetLoftIngressHost(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (string, error) { + ingress, err := kubeClient.NetworkingV1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + if err != nil { + ingress, err := kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + if err != nil { + return "", err + } + // find host + for _, rule := range ingress.Spec.Rules { + return rule.Host, nil + } + } else { + // find host + for _, rule := range ingress.Spec.Rules { + return rule.Host, nil + } + } + + return "", fmt.Errorf("couldn't find any host in loft ingress '%s/loft-ingress', please make sure you have not changed any deployed resources", namespace) +} + +func WaitForReadyLoftPod(ctx context.Context, kubeClient kubernetes.Interface, namespace string, log log.Logger) (*corev1.Pod, error) { + // wait until we have a running loft pod + now := time.Now() + pod := &corev1.Pod{} + err := wait.PollUntilContextTimeout(ctx, time.Second*2, config.Timeout(), true, func(ctx context.Context) (bool, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: "app=loft", + }) + if err != nil { + log.Warnf("Error trying to retrieve %s pod: %v", product.DisplayName(), err) + return false, nil + } else if len(pods.Items) == 0 { + if time.Now().After(now.Add(time.Second * 10)) { + log.Infof("Still waiting for a %s pod...", product.DisplayName()) + now = time.Now() + } + return false, nil + } + + sort.Slice(pods.Items, func(i, j int) bool { + return pods.Items[i].CreationTimestamp.After(pods.Items[j].CreationTimestamp.Time) + }) + + loftPod := &pods.Items[0] + found := false + for _, containerStatus := range loftPod.Status.ContainerStatuses { + if containerStatus.State.Running != nil && containerStatus.Ready { + if containerStatus.Name == "manager" { + found = true + } + + continue + } else if containerStatus.State.Terminated != nil || (containerStatus.State.Waiting != nil && CriticalStatus[containerStatus.State.Waiting.Reason]) { + reason := "" + message := "" + if containerStatus.State.Terminated != nil { + reason = containerStatus.State.Terminated.Reason + message = containerStatus.State.Terminated.Message + } else if containerStatus.State.Waiting != nil { + reason = containerStatus.State.Waiting.Reason + message = containerStatus.State.Waiting.Message + } + + out, err := kubeClient.CoreV1().Pods(namespace).GetLogs(loftPod.Name, &corev1.PodLogOptions{ + Container: "manager", + }).Do(context.Background()).Raw() + if err != nil { + return false, fmt.Errorf("there seems to be an issue with %s starting up: %s (%s). Please reach out to our support at https://loft.sh/", product.DisplayName(), message, reason) + } + if strings.Contains(string(out), "register instance: Post \"https://license.loft.sh/register\": dial tcp") { + return false, fmt.Errorf("%[1]s logs: \n%[2]v \nThere seems to be an issue with %[1]s starting up. Looks like you try to install %[1]s into an air-gapped environment, please reach out to our support at https://loft.sh/ for an offline license", product.DisplayName(), string(out)) + } + + return false, fmt.Errorf("%[1]s logs: \n%v \nThere seems to be an issue with %[1]s starting up: %[2]s (%[3]s). Please reach out to our support at https://loft.sh/", product.DisplayName(), string(out), message, reason) + } else if containerStatus.State.Waiting != nil && time.Now().After(now.Add(time.Second*10)) { + if containerStatus.State.Waiting.Message != "" { + log.Infof("Please keep waiting, %s container is still starting up: %s (%s)", product.DisplayName(), containerStatus.State.Waiting.Message, containerStatus.State.Waiting.Reason) + } else if containerStatus.State.Waiting.Reason != "" { + log.Infof("Please keep waiting, %s container is still starting up: %s", product.DisplayName(), containerStatus.State.Waiting.Reason) + } else { + log.Infof("Please keep waiting, %s container is still starting up...", product.DisplayName()) + } + + now = time.Now() + } + + return false, nil + } + + pod = loftPod + return found, nil + }) + if err != nil { + return nil, err + } + + return pod, nil +} + +func StartPortForwarding(ctx context.Context, config *rest.Config, client kubernetes.Interface, pod *corev1.Pod, localPort string, log log.Logger) (chan struct{}, error) { + log.WriteString(logrus.InfoLevel, "\n") + log.Infof("Starting port-forwarding to the %s pod", product.DisplayName()) + execRequest := client.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(pod.Name). + Namespace(pod.Namespace). + SubResource("portforward") + + t, upgrader, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: t}, "POST", execRequest.URL()) + errChan := make(chan error) + readyChan := make(chan struct{}) + stopChan := make(chan struct{}) + targetPort := getPortForwardingTargetPort(pod) + forwarder, err := portforward.New(dialer, []string{localPort + ":" + strconv.Itoa(targetPort)}, stopChan, readyChan, errChan, io.Discard, io.Discard) + if err != nil { + return nil, err + } + + go func() { + err := forwarder.ForwardPorts(ctx) + if err != nil { + errChan <- err + } + }() + + // wait till ready + select { + case err = <-errChan: + return nil, err + case <-readyChan: + case <-stopChan: + return nil, fmt.Errorf("stopped before ready") + } + + // start watcher + go func() { + for { + select { + case <-stopChan: + return + case err = <-errChan: + log.Infof("error during port forwarder: %v", err) + close(stopChan) + return + } + } + }() + + return stopChan, nil +} + +func GetLoftDefaultPassword(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (string, error) { + loftNamespace, err := kubeClient.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + loftNamespace, err := kubeClient.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + }, metav1.CreateOptions{}) + if err != nil { + return "", err + } + + return string(loftNamespace.UID), nil + } + + return "", err + } + + return string(loftNamespace.UID), nil +} + +type version struct { + Version string `json:"version"` +} + +func IsLoftReachable(ctx context.Context, host string) (bool, error) { + // wait until loft is reachable at the given url + client := &http.Client{ + Transport: httputil.InsecureTransport(), + } + url := "https://" + host + "/version" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return false, fmt.Errorf("error creating request with context: %w", err) + } + resp, err := client.Do(req) + if err == nil && resp.StatusCode == http.StatusOK { + out, err := io.ReadAll(resp.Body) + if err != nil { + return false, nil + } + + v := &version{} + err = json.Unmarshal(out, v) + if err != nil { + return false, fmt.Errorf("error decoding response from %s: %w. Try running '%s --reset'", url, err, product.StartCmd()) + } else if v.Version == "" { + return false, fmt.Errorf("unexpected response from %s: %s. Try running '%s --reset'", url, string(out), product.StartCmd()) + } + + return true, nil + } + + return false, nil +} + +func IsLocalCluster(host string, log log.Logger) bool { + url, err := url.Parse(host) + if err != nil { + log.Warnf("Couldn't parse kube context host url: %v", err) + return false + } + + hostname := url.Hostname() + ip := net.ParseIP(hostname) + if ip != nil { + if IsPrivateIP(ip) { + return true + } + } + + if hostname == "localhost" || strings.HasSuffix(hostname, ".internal") || strings.HasSuffix(hostname, ".localhost") { + return true + } + + return false +} + +var privateIPBlocks []*net.IPNet + +func init() { + for _, cidr := range []string{ + "127.0.0.0/8", // IPv4 loopback + "10.0.0.0/8", // RFC1918 + "172.16.0.0/12", // RFC1918 + "192.168.0.0/16", // RFC1918 + "::1/128", // IPv6 loopback + "fe80::/10", // IPv6 link-local + "fc00::/7", // IPv6 unique local addr + } { + _, block, _ := net.ParseCIDR(cidr) + privateIPBlocks = append(privateIPBlocks, block) + } +} + +// IsPrivateIP checks if a given ip is private +func IsPrivateIP(ip net.IP) bool { + for _, block := range privateIPBlocks { + if block.Contains(ip) { + return true + } + } + + return false +} + +func EnterHostNameQuestion(log log.Logger) (string, error) { + return log.Question(&survey.QuestionOptions{ + Question: fmt.Sprintf("Enter a hostname for your %s instance (e.g. loft.my-domain.tld): \n ", product.DisplayName()), + ValidationFunc: func(answer string) error { + u, err := url.Parse("https://" + answer) + if err != nil || u.Path != "" || u.Port() != "" || len(strings.Split(answer, ".")) < 2 { + return fmt.Errorf("please enter a valid hostname without protocol (https://), without path and without port, e.g. loft.my-domain.tld") + } + return nil + }, + }) +} + +func IsLoftAlreadyInstalled(ctx context.Context, kubeClient kubernetes.Interface, namespace string) (bool, error) { + _, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return false, nil + } + + return false, fmt.Errorf("error accessing kubernetes cluster: %w", err) + } + + return true, nil +} + +func UninstallLoft(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, kubeContext, namespace string, log log.Logger) error { + log.Infof("Uninstalling %s...", product.DisplayName()) + releaseName := defaultReleaseName + deploy, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, defaultDeploymentName, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } else if deploy != nil && deploy.Labels != nil && deploy.Labels["release"] != "" { + releaseName = deploy.Labels["release"] + } + + args := []string{ + "uninstall", + releaseName, + "--kube-context", + kubeContext, + "--namespace", + namespace, + } + log.Infof("Executing command: helm %s", strings.Join(args, " ")) + output, err := exec.Command("helm", args...).CombinedOutput() + if err != nil { + log.Errorf("error during helm command: %s (%v)", string(output), err) + } + + // we also cleanup the validating webhook configuration and apiservice + apiRegistrationClient, err := clientset.NewForConfig(restConfig) + if err != nil { + return err + } + + err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1.management.loft.sh", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = deleteUser(ctx, restConfig, "admin") + if err != nil { + return err + } + + err = kubeClient.CoreV1().Secrets(namespace).Delete(context.Background(), "loft-user-secret-admin", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = kubeClient.CoreV1().Secrets(namespace).Delete(context.Background(), LoftRouterDomainSecret, metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + // we also cleanup the validating webhook configuration and apiservice + err = kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(ctx, "loft-agent", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1alpha1.tenancy.kiosk.sh", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = apiRegistrationClient.ApiregistrationV1().APIServices().Delete(ctx, "v1.cluster.loft.sh", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, "loft-agent-controller", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + err = kubeClient.CoreV1().ConfigMaps(namespace).Delete(ctx, "loft-applied-defaults", metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + log.WriteString(logrus.InfoLevel, "\n") + log.Done(product.Replace("Successfully uninstalled Loft")) + log.WriteString(logrus.InfoLevel, "\n") + + return nil +} + +func deleteUser(ctx context.Context, restConfig *rest.Config, name string) error { + loftClient, err := loftclientset.NewForConfig(restConfig) + if err != nil { + return err + } + + user, err := loftClient.StorageV1().Users().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil + } else if len(user.Finalizers) > 0 { + user.Finalizers = nil + _, err = loftClient.StorageV1().Users().Update(ctx, user, metav1.UpdateOptions{}) + if err != nil { + if kerrors.IsConflict(err) { + return deleteUser(ctx, restConfig, name) + } + + return err + } + } + + err = loftClient.StorageV1().Users().Delete(ctx, name, metav1.DeleteOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return err + } + + return nil +} + +func EnsureIngressController(ctx context.Context, kubeClient kubernetes.Interface, kubeContext string, log log.Logger) error { + // first create an ingress controller + const ( + YesOption = "Yes" + NoOption = "No, I already have an ingress controller installed." + ) + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Ingress controller required. Should the nginx-ingress controller be installed?", + DefaultValue: YesOption, + Options: []string{ + YesOption, + NoOption, + }, + }) + if err != nil { + return err + } + + if answer == YesOption { + args := []string{ + "install", + "ingress-nginx", + "ingress-nginx", + "--repository-config=''", + "--repo", + "https://kubernetes.github.io/ingress-nginx", + "--kube-context", + kubeContext, + "--namespace", + "ingress-nginx", + "--create-namespace", + "--set-string", + "controller.config.hsts=false", + "--wait", + } + log.WriteString(logrus.InfoLevel, "\n") + log.Infof("Executing command: helm %s\n", strings.Join(args, " ")) + log.Info("Waiting for ingress controller deployment, this can take several minutes...") + helmCmd := exec.Command("helm", args...) + output, err := helmCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error during helm command: %s (%w)", string(output), err) + } + + list, err := kubeClient.CoreV1().Secrets("ingress-nginx").List(ctx, metav1.ListOptions{ + LabelSelector: "name=ingress-nginx,owner=helm,status=deployed", + }) + if err != nil { + return err + } + + if len(list.Items) == 1 { + secret := list.Items[0] + originalSecret := secret.DeepCopy() + secret.Labels["loft.sh/app"] = "true" + if secret.Annotations == nil { + secret.Annotations = map[string]string{} + } + + secret.Annotations["loft.sh/url"] = "https://kubernetes.github.io/ingress-nginx" + originalJSON, err := json.Marshal(originalSecret) + if err != nil { + return err + } + modifiedJSON, err := json.Marshal(secret) + if err != nil { + return err + } + data, err := jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return err + } + _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Patch(ctx, secret.Name, types.MergePatchType, data, metav1.PatchOptions{}) + if err != nil { + return err + } + } + + log.Done("Successfully installed ingress-nginx to your kubernetes cluster!") + } + + return nil +} + +func UpgradeLoft(chartName, chartRepo, kubeContext, namespace string, extraArgs []string, log log.Logger) error { + // now we install loft + args := []string{ + "upgrade", + defaultReleaseName, + chartName, + "--install", + "--reuse-values", + "--create-namespace", + "--repository-config=''", + "--kube-context", + kubeContext, + "--namespace", + namespace, + } + if chartRepo != "" { + args = append(args, "--repo", chartRepo) + } + args = append(args, extraArgs...) + + log.WriteString(logrus.InfoLevel, "\n") + log.Infof("Executing command: helm %s\n", strings.Join(args, " ")) + log.Info("Waiting for helm command, this can take up to several minutes...") + helmCmd := exec.Command("helm", args...) + if chartRepo != "" { + helmWorkDir, err := getHelmWorkdir(chartName) + if err != nil { + return err + } + + helmCmd.Dir = helmWorkDir + } + output, err := helmCmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error during helm command: %s (%w)", string(output), err) + } + + log.Donef("%s has been deployed to your cluster!", product.DisplayName()) + return nil +} + +func GetLoftManifests(chartName, chartRepo, kubeContext, namespace string, extraArgs []string, _ log.Logger) (string, error) { + args := []string{ + "template", + defaultReleaseName, + chartName, + "--repository-config=''", + "--kube-context", + kubeContext, + "--namespace", + namespace, + } + if chartRepo != "" { + args = append(args, "--repo", chartRepo) + } + args = append(args, extraArgs...) + + helmCmd := exec.Command("helm", args...) + if chartRepo != "" { + helmWorkDir, err := getHelmWorkdir(chartName) + if err != nil { + return "", err + } + + helmCmd.Dir = helmWorkDir + } + output, err := helmCmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("error during helm command: %s (%w)", string(output), err) + } + return string(output), nil +} + +// Return the directory where the `helm` commands should be executed or error if none can be found/created +// Uses current workdir by default unless it contains a folder with the chart name +func getHelmWorkdir(chartName string) (string, error) { + // If chartName folder exists, check temp dir next + if _, err := os.Stat(chartName); err == nil { + tempDir := os.TempDir() + + // If tempDir/chartName folder exists, create temp folder + if _, err := os.Stat(path.Join(tempDir, chartName)); err == nil { + tempDir, err = os.MkdirTemp(tempDir, chartName) + if err != nil { + return "", errors.New("problematic directory `" + chartName + "` found: please execute command in a different folder") + } + } + + // Use tempDir + return tempDir, nil + } + + // Use current workdir + return "", nil +} + +// Makes sure that admin user and password secret exists +// Returns (true, nil) if everything is correct but password is different from parameter `password` +func EnsureAdminPassword(ctx context.Context, kubeClient kubernetes.Interface, restConfig *rest.Config, password string, log log.Logger) (bool, error) { + loftClient, err := loftclientset.NewForConfig(restConfig) + if err != nil { + return false, err + } + + admin, err := loftClient.StorageV1().Users().Get(ctx, "admin", metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return false, err + } else if admin == nil { + admin, err = loftClient.StorageV1().Users().Create(ctx, &storagev1.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "admin", + }, + Spec: storagev1.UserSpec{ + Username: "admin", + Email: "test@domain.tld", + Subject: "admin", + Groups: []string{"system:masters"}, + PasswordRef: &storagev1.SecretRef{ + SecretName: "loft-user-secret-admin", + SecretNamespace: "loft", + Key: "password", + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return false, err + } + } else if admin.Spec.PasswordRef == nil || admin.Spec.PasswordRef.SecretName == "" || admin.Spec.PasswordRef.SecretNamespace == "" { + return false, nil + } + + key := admin.Spec.PasswordRef.Key + if key == "" { + key = "password" + } + + passwordHash := fmt.Sprintf("%x", sha256.Sum256([]byte(password))) + + secret, err := kubeClient.CoreV1().Secrets(admin.Spec.PasswordRef.SecretNamespace).Get(ctx, admin.Spec.PasswordRef.SecretName, metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + return false, err + } else if err == nil { + existingPasswordHash, keyExists := secret.Data[key] + if keyExists { + return (string(existingPasswordHash) != passwordHash), nil + } + + secret.Data[key] = []byte(passwordHash) + _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Update(ctx, secret, metav1.UpdateOptions{}) + if err != nil { + return false, errors.Wrap(err, "update admin password secret") + } + return false, nil + } + + // create the password secret if it was not found, this can happen if you delete the loft namespace without deleting the admin user + secret = &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: admin.Spec.PasswordRef.SecretName, + Namespace: admin.Spec.PasswordRef.SecretNamespace, + }, + Data: map[string][]byte{ + key: []byte(passwordHash), + }, + } + _, err = kubeClient.CoreV1().Secrets(secret.Namespace).Create(ctx, secret, metav1.CreateOptions{}) + if err != nil { + return false, errors.Wrap(err, "create admin password secret") + } + + log.Info("Successfully recreated admin password secret") + return false, nil +} + +func IsLoftInstalledLocally(ctx context.Context, kubeClient kubernetes.Interface, namespace string) bool { + _, err := kubeClient.NetworkingV1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + _, err = kubeClient.NetworkingV1beta1().Ingresses(namespace).Get(ctx, "loft-ingress", metav1.GetOptions{}) + return kerrors.IsNotFound(err) + } + + return kerrors.IsNotFound(err) +} + +func getPortForwardingTargetPort(pod *corev1.Pod) int { + for _, container := range pod.Spec.Containers { + if container.Name == "manager" { + for _, port := range container.Ports { + if port.Name == "https" { + return int(port.ContainerPort) + } + } + } + } + + return 10443 +} diff --git a/pkg/platform/helper/helper.go b/pkg/platform/helper/helper.go new file mode 100644 index 000000000..189caa180 --- /dev/null +++ b/pkg/platform/helper/helper.go @@ -0,0 +1,1160 @@ +package helper + +import ( + "context" + "errors" + "fmt" + "os" + "sort" + "strings" + + "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset/scheme" + authorizationv1 "k8s.io/api/authorization/v1" + + clusterv1 "github.com/loft-sh/agentapi/v4/pkg/apis/loft/cluster/v1" + managementv1 "github.com/loft-sh/api/v4/pkg/apis/management/v1" + "github.com/loft-sh/log" + "github.com/loft-sh/log/survey" + "github.com/loft-sh/vcluster/pkg/platform" + "github.com/loft-sh/vcluster/pkg/platform/clihelper" + "github.com/loft-sh/vcluster/pkg/platform/kube" + "github.com/loft-sh/vcluster/pkg/platform/kubeconfig" + "github.com/loft-sh/vcluster/pkg/projectutil" + "github.com/mgutz/ansi" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubectl/pkg/util/term" +) + +var errNoClusterAccess = errors.New("the user has no access to any cluster") + +type VirtualClusterInstanceProject struct { + VirtualCluster *managementv1.VirtualClusterInstance + Project *managementv1.Project +} + +type SpaceInstanceProject struct { + SpaceInstance *managementv1.SpaceInstance + Project *managementv1.Project +} + +func SelectVirtualClusterTemplate(ctx context.Context, baseClient platform.Client, projectName, templateName string, log log.Logger) (*managementv1.VirtualClusterTemplate, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectTemplates, err := managementClient.Loft().ManagementV1().Projects().ListTemplates(ctx, projectName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // select default template + if templateName == "" && projectTemplates.DefaultVirtualClusterTemplate != "" { + templateName = projectTemplates.DefaultVirtualClusterTemplate + } + + // try to find template + if templateName != "" { + for _, virtualClusterTemplate := range projectTemplates.VirtualClusterTemplates { + if virtualClusterTemplate.Name == templateName { + return &virtualClusterTemplate, nil + } + } + + return nil, fmt.Errorf("couldn't find template %s as allowed template in project %s", templateName, projectName) + } else if len(projectTemplates.VirtualClusterTemplates) == 0 { + return nil, fmt.Errorf("there are no allowed virtual cluster templates in project %s", projectName) + } else if len(projectTemplates.VirtualClusterTemplates) == 1 { + return &projectTemplates.VirtualClusterTemplates[0], nil + } + + templateNames := []string{} + for _, template := range projectTemplates.VirtualClusterTemplates { + templateNames = append(templateNames, clihelper.GetDisplayName(template.Name, template.Spec.DisplayName)) + } + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a template to use", + DefaultValue: templateNames[0], + Options: templateNames, + }) + if err != nil { + return nil, err + } + for _, template := range projectTemplates.VirtualClusterTemplates { + if answer == clihelper.GetDisplayName(template.Name, template.Spec.DisplayName) { + return &template, nil + } + } + + return nil, fmt.Errorf("answer not found") +} + +func SelectSpaceTemplate(ctx context.Context, baseClient platform.Client, projectName, templateName string, log log.Logger) (*managementv1.SpaceTemplate, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectTemplates, err := managementClient.Loft().ManagementV1().Projects().ListTemplates(ctx, projectName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // select default template + if templateName == "" && projectTemplates.DefaultSpaceTemplate != "" { + templateName = projectTemplates.DefaultSpaceTemplate + } + + // try to find template + if templateName != "" { + for _, spaceTemplate := range projectTemplates.SpaceTemplates { + if spaceTemplate.Name == templateName { + return &spaceTemplate, nil + } + } + + return nil, fmt.Errorf("couldn't find template %s as allowed template in project %s", templateName, projectName) + } else if len(projectTemplates.SpaceTemplates) == 0 { + return nil, fmt.Errorf("there are no allowed space templates in project %s", projectName) + } else if len(projectTemplates.SpaceTemplates) == 1 { + return &projectTemplates.SpaceTemplates[0], nil + } + + templateNames := []string{} + for _, template := range projectTemplates.SpaceTemplates { + templateNames = append(templateNames, clihelper.GetDisplayName(template.Name, template.Spec.DisplayName)) + } + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a template to use", + DefaultValue: templateNames[0], + Options: templateNames, + }) + if err != nil { + return nil, err + } + for _, template := range projectTemplates.SpaceTemplates { + if answer == clihelper.GetDisplayName(template.Name, template.Spec.DisplayName) { + return &template, nil + } + } + + return nil, fmt.Errorf("answer not found") +} + +func SelectVirtualClusterInstanceOrVirtualCluster(ctx context.Context, baseClient platform.Client, virtualClusterName, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, string, error) { + if clusterName != "" || spaceName != "" { + virtualCluster, space, cluster, err := SelectVirtualClusterAndSpaceAndClusterName(ctx, baseClient, virtualClusterName, spaceName, clusterName, log) + return cluster, "", space, virtualCluster, err + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", "", "", "", err + } + + // gather projects and virtual cluster instances to access + var projects []*managementv1.Project + if projectName != "" { + project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return "", "", "", "", fmt.Errorf("couldn't find or access project %s", projectName) + } + + return "", "", "", "", err + } + + projects = append(projects, project) + } else { + projectsList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil || len(projectsList.Items) == 0 { + virtualCluster, space, cluster, err := SelectVirtualClusterAndSpaceAndClusterName(ctx, baseClient, virtualClusterName, spaceName, clusterName, log) + return cluster, "", space, virtualCluster, err + } + + for _, p := range projectsList.Items { + proj := p + projects = append(projects, &proj) + } + } + + // gather space instances in those projects + var virtualClusters []*VirtualClusterInstanceProject + for _, p := range projects { + if virtualClusterName != "" { + virtualClusterInstance, err := getProjectVirtualClusterInstance(ctx, managementClient, p, virtualClusterName) + if err != nil { + continue + } + + virtualClusters = append(virtualClusters, virtualClusterInstance) + } else { + projectVirtualClusters, err := getProjectVirtualClusterInstances(ctx, managementClient, p) + if err != nil { + continue + } + + virtualClusters = append(virtualClusters, projectVirtualClusters...) + } + } + + // get unformatted options + var optionsUnformatted [][]string + for _, virtualCluster := range virtualClusters { + optionsUnformatted = append(optionsUnformatted, []string{"vcluster: " + clihelper.GetDisplayName(virtualCluster.VirtualCluster.Name, virtualCluster.VirtualCluster.Spec.DisplayName), "Project: " + clihelper.GetDisplayName(virtualCluster.Project.Name, virtualCluster.Project.Spec.DisplayName)}) + } + + // check if there are virtualclusters + if len(virtualClusters) == 0 { + if virtualClusterName != "" { + return "", "", "", "", fmt.Errorf("couldn't find or access virtual cluster %s", virtualClusterName) + } + return "", "", "", "", fmt.Errorf("couldn't find a virtual cluster you have access to") + } else if len(virtualClusters) == 1 { + return "", virtualClusters[0].Project.Name, "", virtualClusters[0].VirtualCluster.Name, nil + } + + questionOptions := formatOptions("%s | %s", optionsUnformatted) + selectedOption, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a virtual cluster", + DefaultValue: questionOptions[0], + Options: questionOptions, + }) + if err != nil { + return "", "", "", "", err + } + + for idx, s := range questionOptions { + if s == selectedOption { + return "", virtualClusters[idx].Project.Name, "", virtualClusters[idx].VirtualCluster.Name, nil + } + } + + return "", "", "", "", fmt.Errorf("couldn't find answer") +} + +func SelectSpaceInstanceOrSpace(ctx context.Context, baseClient platform.Client, spaceName, projectName, clusterName string, log log.Logger) (string, string, string, error) { + if clusterName != "" { + space, cluster, err := SelectSpaceAndClusterName(ctx, baseClient, spaceName, clusterName, log) + return cluster, "", space, err + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", "", "", err + } + + // gather projects and space instances to access + var projects []*managementv1.Project + if projectName != "" { + project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) + if err != nil { + if kerrors.IsNotFound(err) { + return "", "", "", fmt.Errorf("couldn't find or access project %s", projectName) + } + + return "", "", "", err + } + + projects = append(projects, project) + } else { + projectsList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil || len(projectsList.Items) == 0 { + space, cluster, err := SelectSpaceAndClusterName(ctx, baseClient, spaceName, clusterName, log) + return cluster, "", space, err + } + + for _, p := range projectsList.Items { + proj := p + projects = append(projects, &proj) + } + } + + // gather space instances in those projects + var spaces []*SpaceInstanceProject + for _, p := range projects { + if spaceName != "" { + spaceInstance, err := getProjectSpaceInstance(ctx, managementClient, p, spaceName) + if err != nil { + continue + } + + spaces = append(spaces, spaceInstance) + } else { + projectSpaceInstances, err := getProjectSpaceInstances(ctx, managementClient, p) + if err != nil { + continue + } + + spaces = append(spaces, projectSpaceInstances...) + } + } + + // get unformatted options + var optionsUnformatted [][]string + for _, space := range spaces { + optionsUnformatted = append(optionsUnformatted, []string{"Space: " + clihelper.GetDisplayName(space.SpaceInstance.Name, space.SpaceInstance.Spec.DisplayName), "Project: " + clihelper.GetDisplayName(space.Project.Name, space.Project.Spec.DisplayName)}) + } + + // check if there are spaces + if len(spaces) == 0 { + if spaceName != "" { + return "", "", "", fmt.Errorf("couldn't find or access space %s", spaceName) + } + return "", "", "", fmt.Errorf("couldn't find a space you have access to") + } else if len(spaces) == 1 { + return spaces[0].SpaceInstance.Spec.ClusterRef.Cluster, spaces[0].Project.Name, spaces[0].SpaceInstance.Name, nil + } + + questionOptions := formatOptions("%s | %s", optionsUnformatted) + selectedOption, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a space", + DefaultValue: questionOptions[0], + Options: questionOptions, + }) + if err != nil { + return "", "", "", err + } + + for idx, s := range questionOptions { + if s == selectedOption { + return spaces[idx].SpaceInstance.Spec.ClusterRef.Cluster, spaces[idx].Project.Name, spaces[idx].SpaceInstance.Name, nil + } + } + + return "", "", "", fmt.Errorf("couldn't find answer") +} + +func SelectProjectOrCluster(ctx context.Context, baseClient platform.Client, clusterName, projectName string, allowClusterOnly bool, log log.Logger) (cluster string, project string, err error) { + if projectName != "" { + return clusterName, projectName, nil + } else if allowClusterOnly && clusterName != "" { + return clusterName, "", nil + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", "", err + } + + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return "", "", err + } + + projectNames := []string{} + for _, project := range projectList.Items { + projectNames = append(projectNames, clihelper.GetDisplayName(project.Name, project.Spec.DisplayName)) + } + + if len(projectNames) == 0 { + cluster, err := SelectCluster(ctx, baseClient, log) + if err != nil { + if errors.Is(err, errNoClusterAccess) { + return "", "", fmt.Errorf("the user has no access to a project") + } + + return "", "", err + } + + return cluster, "", nil + } + + var selectedProject *managementv1.Project + if len(projectNames) == 1 { + selectedProject = &projectList.Items[0] + } else { + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a project to use", + DefaultValue: projectNames[0], + Options: projectNames, + }) + if err != nil { + return "", "", err + } + for idx, project := range projectList.Items { + if answer == clihelper.GetDisplayName(project.Name, project.Spec.DisplayName) { + selectedProject = &projectList.Items[idx] + } + } + if selectedProject == nil { + return "", "", fmt.Errorf("answer not found") + } + } + + if clusterName == "" { + clusterName, err = SelectProjectCluster(ctx, baseClient, selectedProject, log) + return clusterName, selectedProject.Name, err + } + + return clusterName, selectedProject.Name, nil +} + +// SelectCluster lets the user select a cluster +func SelectCluster(ctx context.Context, baseClient platform.Client, log log.Logger) (string, error) { + managementClient, err := baseClient.Management() + if err != nil { + return "", err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return "", err + } + + clusterNames := []string{} + for _, cluster := range clusterList.Items { + clusterNames = append(clusterNames, clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName)) + } + + if len(clusterList.Items) == 0 { + return "", errNoClusterAccess + } else if len(clusterList.Items) == 1 { + return clusterList.Items[0].Name, nil + } + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a cluster to use", + DefaultValue: clusterNames[0], + Options: clusterNames, + }) + if err != nil { + return "", err + } + for _, cluster := range clusterList.Items { + if answer == clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName) { + return cluster.Name, nil + } + } + return "", fmt.Errorf("answer not found") +} + +// SelectProjectCluster lets the user select a cluster from the project's allowed clusters +func SelectProjectCluster(ctx context.Context, baseClient platform.Client, project *managementv1.Project, log log.Logger) (string, error) { + if !term.IsTerminal(os.Stdin) { + // Allow loft to schedule as before + return "", nil + } + + managementClient, err := baseClient.Management() + if err != nil { + return "", err + } + + clusterList, err := managementClient.Loft().ManagementV1().Projects().ListClusters(ctx, project.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + + anyClusterOption := "Any Cluster [Loft Selects Cluster]" + clusterNames := []string{} + for _, allowedCluster := range project.Spec.AllowedClusters { + if allowedCluster.Name == "*" { + clusterNames = append(clusterNames, anyClusterOption) + break + } + } + + for _, cluster := range clusterList.Clusters { + clusterNames = append(clusterNames, clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName)) + } + + if len(clusterList.Clusters) == 0 { + return "", errNoClusterAccess + } else if len(clusterList.Clusters) == 1 { + return clusterList.Clusters[0].Name, nil + } + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a cluster to use", + DefaultValue: clusterNames[0], + Options: clusterNames, + }) + if err != nil { + return "", err + } + + if answer == anyClusterOption { + return "", nil + } + + for _, cluster := range clusterList.Clusters { + if answer == clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName) { + return cluster.Name, nil + } + } + return "", fmt.Errorf("answer not found") +} + +// SelectUserOrTeam lets the user select an user or team in a cluster +func SelectUserOrTeam(ctx context.Context, baseClient platform.Client, clusterName string, log log.Logger) (*clusterv1.EntityInfo, *clusterv1.EntityInfo, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, nil, err + } + + clusterAccess, err := managementClient.Loft().ManagementV1().Clusters().ListAccess(ctx, clusterName, metav1.GetOptions{}) + if err != nil { + return nil, nil, err + } + + var user *clusterv1.EntityInfo + if len(clusterAccess.Users) > 0 { + user = &clusterAccess.Users[0].Info + } + + teams := []*clusterv1.EntityInfo{} + for _, team := range clusterAccess.Teams { + t := team + teams = append(teams, &t.Info) + } + + if user == nil && len(teams) == 0 { + return nil, nil, fmt.Errorf("the user has no access to cluster %s", clusterName) + } else if user != nil && len(teams) == 0 { + return user, nil, nil + } else if user == nil && len(teams) == 1 { + return nil, teams[0], nil + } + + names := []string{} + if user != nil { + names = append(names, "User "+clihelper.DisplayName(user)) + } + for _, t := range teams { + names = append(names, "Team "+clihelper.DisplayName(t)) + } + + answer, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a user or team to use", + DefaultValue: names[0], + Options: names, + }) + if err != nil { + return nil, nil, err + } + + if user != nil && "User "+clihelper.DisplayName(user) == answer { + return user, nil, nil + } + for _, t := range teams { + if "Team "+clihelper.DisplayName(t) == answer { + return nil, t, nil + } + } + + return nil, nil, fmt.Errorf("answer not found") +} + +type ClusterUserOrTeam struct { + Team bool + ClusterMember managementv1.ClusterMember +} + +func SelectClusterUserOrTeam(ctx context.Context, baseClient platform.Client, clusterName, userName, teamName string, log log.Logger) (*ClusterUserOrTeam, error) { + if userName != "" && teamName != "" { + return nil, fmt.Errorf("team and user specified, please only choose one") + } + + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + members, err := managementClient.Loft().ManagementV1().Clusters().ListMembers(ctx, clusterName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("retrieve cluster members: %w", err) + } + + matchedMembers := []ClusterUserOrTeam{} + optionsUnformatted := [][]string{} + for _, user := range members.Users { + if teamName != "" { + continue + } else if userName != "" && user.Info.Name != userName { + continue + } + + matchedMembers = append(matchedMembers, ClusterUserOrTeam{ + ClusterMember: user, + }) + displayName := user.Info.DisplayName + if displayName == "" { + displayName = user.Info.Name + } + + optionsUnformatted = append(optionsUnformatted, []string{"User: " + displayName, "Kube User: " + user.Info.Name}) + } + for _, team := range members.Teams { + if userName != "" { + continue + } else if teamName != "" && team.Info.Name != teamName { + continue + } + + matchedMembers = append(matchedMembers, ClusterUserOrTeam{ + Team: true, + ClusterMember: team, + }) + displayName := team.Info.DisplayName + if displayName == "" { + displayName = team.Info.Name + } + + optionsUnformatted = append(optionsUnformatted, []string{"Team: " + displayName, "Kube Team: " + team.Info.Name}) + } + + questionOptions := formatOptions("%s | %s", optionsUnformatted) + if len(questionOptions) == 0 { + if userName == "" && teamName == "" { + return nil, fmt.Errorf("couldn't find any space") + } else if userName != "" { + return nil, fmt.Errorf("couldn't find user %s in cluster %s", ansi.Color(userName, "white+b"), ansi.Color(clusterName, "white+b")) + } + + return nil, fmt.Errorf("couldn't find team %s in cluster %s", ansi.Color(teamName, "white+b"), ansi.Color(clusterName, "white+b")) + } else if len(questionOptions) == 1 { + return &matchedMembers[0], nil + } + + selectedMember, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a user or team", + DefaultValue: questionOptions[0], + Options: questionOptions, + }) + if err != nil { + return nil, err + } + + for idx, s := range questionOptions { + if s == selectedMember { + return &matchedMembers[idx], nil + } + } + + return nil, fmt.Errorf("selected question option not found") +} + +func GetVirtualClusterInstances(ctx context.Context, baseClient platform.Client) ([]*VirtualClusterInstanceProject, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + var retVClusters []*VirtualClusterInstanceProject + for _, project := range projectList.Items { + p := &project + + virtualClusterInstances, err := getProjectVirtualClusterInstances(ctx, managementClient, p) + if err != nil { + return nil, err + } + + retVClusters = append(retVClusters, virtualClusterInstances...) + } + + return retVClusters, nil +} + +func CanAccessProjectSecret(ctx context.Context, managementClient kube.Interface, namespace, name string) (bool, error) { + return CanAccessInstance(ctx, managementClient, namespace, name, "projectsecrets") +} + +func CanAccessInstance(ctx context.Context, managementClient kube.Interface, namespace, name string, resource string) (bool, error) { + selfSubjectAccessReview, err := managementClient.Loft().ManagementV1().SelfSubjectAccessReviews().Create(ctx, &managementv1.SelfSubjectAccessReview{ + Spec: managementv1.SelfSubjectAccessReviewSpec{ + SelfSubjectAccessReviewSpec: authorizationv1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &authorizationv1.ResourceAttributes{ + Verb: "use", + Group: managementv1.SchemeGroupVersion.Group, + Version: managementv1.SchemeGroupVersion.Version, + Resource: resource, + Namespace: namespace, + Name: name, + }, + }, + }, + }, metav1.CreateOptions{}) + if err != nil { + return false, err + } else if !selfSubjectAccessReview.Status.Allowed || selfSubjectAccessReview.Status.Denied { + return false, nil + } + return true, nil +} + +func GetSpaceInstances(ctx context.Context, baseClient platform.Client) ([]*SpaceInstanceProject, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + var retSpaces []*SpaceInstanceProject + for _, project := range projectList.Items { + p := &project + + spaceInstances, err := getProjectSpaceInstances(ctx, managementClient, p) + if err != nil { + return nil, err + } + + retSpaces = append(retSpaces, spaceInstances...) + } + + return retSpaces, nil +} + +type ProjectProjectSecret struct { + ProjectSecret managementv1.ProjectSecret + Project string +} + +func GetProjectSecrets(ctx context.Context, managementClient kube.Interface, projectNames ...string) ([]*ProjectProjectSecret, error) { + var projects []*managementv1.Project + if len(projectNames) == 0 { + projectList, err := managementClient.Loft().ManagementV1().Projects().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for idx := range projectList.Items { + projectItem := projectList.Items[idx] + projects = append(projects, &projectItem) + } + } else { + for _, projectName := range projectNames { + project, err := managementClient.Loft().ManagementV1().Projects().Get(ctx, projectName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + projects = append(projects, project) + } + } + + var retSecrets []*ProjectProjectSecret + for _, project := range projects { + projectSecrets, err := managementClient.Loft().ManagementV1().ProjectSecrets(projectutil.ProjectNamespace(project.Name)).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + for _, projectSecret := range projectSecrets.Items { + canAccess, err := CanAccessProjectSecret(ctx, managementClient, projectSecret.Namespace, projectSecret.Name) + if err != nil { + return nil, err + } else if !canAccess { + continue + } + + retSecrets = append(retSecrets, &ProjectProjectSecret{ + ProjectSecret: projectSecret, + Project: project.Name, + }) + } + } + + return retSecrets, nil +} + +type ClusterSpace struct { + clusterv1.Space + Cluster string +} + +// GetSpaces returns all spaces accessible by the user or team +func GetSpaces(ctx context.Context, baseClient platform.Client, log log.Logger) ([]ClusterSpace, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + spaceList := []ClusterSpace{} + for _, cluster := range clusterList.Items { + clusterClient, err := baseClient.Cluster(cluster.Name) + if err != nil { + return nil, err + } + + spaces, err := clusterClient.Agent().ClusterV1().Spaces().List(ctx, metav1.ListOptions{}) + if err != nil { + if kerrors.IsForbidden(err) { + continue + } + + log.Warnf("Error retrieving spaces from cluster %s: %v", clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName), err) + continue + } + + for _, space := range spaces.Items { + spaceList = append(spaceList, ClusterSpace{ + Space: space, + Cluster: cluster.Name, + }) + } + } + sort.Slice(spaceList, func(i, j int) bool { + return spaceList[i].Name < spaceList[j].Name + }) + + return spaceList, nil +} + +type ClusterVirtualCluster struct { + clusterv1.VirtualCluster + Cluster string +} + +// GetVirtualClusters returns all virtual clusters the user has access to +func GetVirtualClusters(ctx context.Context, baseClient platform.Client, log log.Logger) ([]ClusterVirtualCluster, error) { + managementClient, err := baseClient.Management() + if err != nil { + return nil, err + } + + clusterList, err := managementClient.Loft().ManagementV1().Clusters().List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + virtualClusterList := []ClusterVirtualCluster{} + for _, cluster := range clusterList.Items { + clusterClient, err := baseClient.Cluster(cluster.Name) + if err != nil { + return nil, err + } + + virtualClusters, err := clusterClient.Agent().ClusterV1().VirtualClusters("").List(ctx, metav1.ListOptions{}) + if err != nil { + if kerrors.IsForbidden(err) { + continue + } + + log.Warnf("Error retrieving virtual clusters from cluster %s: %v", clihelper.GetDisplayName(cluster.Name, cluster.Spec.DisplayName), err) + continue + } + + for _, virtualCluster := range virtualClusters.Items { + virtualClusterList = append(virtualClusterList, ClusterVirtualCluster{ + VirtualCluster: virtualCluster, + Cluster: cluster.Name, + }) + } + } + sort.Slice(virtualClusterList, func(i, j int) bool { + return virtualClusterList[i].Name < virtualClusterList[j].Name + }) + + return virtualClusterList, nil +} + +// SelectSpaceAndClusterName selects a space and cluster name +func SelectSpaceAndClusterName(ctx context.Context, baseClient platform.Client, spaceName, clusterName string, log log.Logger) (string, string, error) { + spaces, err := GetSpaces(ctx, baseClient, log) + if err != nil { + return "", "", err + } + + currentContext, err := kubeconfig.CurrentContext() + if err != nil { + return "", "", fmt.Errorf("loading kubernetes config: %w", err) + } + + isLoftContext, cluster, namespace, vCluster := kubeconfig.ParseContext(currentContext) + matchedSpaces := []ClusterSpace{} + questionOptionsUnformatted := [][]string{} + defaultIndex := 0 + for _, space := range spaces { + if spaceName != "" && space.Space.Name != spaceName { + continue + } else if clusterName != "" && space.Cluster != clusterName { + continue + } else if len(matchedSpaces) > 20 { + break + } + + if isLoftContext && vCluster == "" && cluster == space.Cluster && namespace == space.Space.Name { + defaultIndex = len(questionOptionsUnformatted) + } + + matchedSpaces = append(matchedSpaces, space) + spaceName := space.Space.Name + if space.Space.Annotations != nil && space.Space.Annotations["loft.sh/display-name"] != "" { + spaceName = space.Space.Annotations["loft.sh/display-name"] + " (" + spaceName + ")" + } + + questionOptionsUnformatted = append(questionOptionsUnformatted, []string{spaceName, space.Cluster}) + } + + questionOptions := formatOptions("Space: %s | Cluster: %s", questionOptionsUnformatted) + if len(questionOptions) == 0 { + if spaceName == "" { + return "", "", fmt.Errorf("couldn't find any space") + } else if clusterName != "" { + return "", "", fmt.Errorf("couldn't find space %s in cluster %s", ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) + } + + return "", "", fmt.Errorf("couldn't find space %s", ansi.Color(spaceName, "white+b")) + } else if len(questionOptions) == 1 { + return matchedSpaces[0].Space.Name, matchedSpaces[0].Cluster, nil + } + + selectedSpace, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a space", + DefaultValue: questionOptions[defaultIndex], + Options: questionOptions, + }) + if err != nil { + return "", "", err + } + + for idx, s := range questionOptions { + if s == selectedSpace { + clusterName = matchedSpaces[idx].Cluster + spaceName = matchedSpaces[idx].Space.Name + break + } + } + + return spaceName, clusterName, nil +} + +func GetCurrentUser(ctx context.Context, managementClient kube.Interface) (*managementv1.UserInfo, *clusterv1.EntityInfo, error) { + self, err := managementClient.Loft().ManagementV1().Selves().Create(ctx, &managementv1.Self{}, metav1.CreateOptions{}) + if err != nil { + return nil, nil, fmt.Errorf("get self: %w", err) + } else if self.Status.User == nil && self.Status.Team == nil { + return nil, nil, fmt.Errorf("no user or team name returned") + } + + return self.Status.User, self.Status.Team, nil +} + +func SelectVirtualClusterAndSpaceAndClusterName(ctx context.Context, baseClient platform.Client, virtualClusterName, spaceName, clusterName string, log log.Logger) (string, string, string, error) { + virtualClusters, err := GetVirtualClusters(ctx, baseClient, log) + if err != nil { + return "", "", "", err + } + + currentContext, err := kubeconfig.CurrentContext() + if err != nil { + return "", "", "", fmt.Errorf("loading kubernetes config: %w", err) + } + + isLoftContext, cluster, namespace, vCluster := kubeconfig.ParseContext(currentContext) + matchedVClusters := []ClusterVirtualCluster{} + questionOptionsUnformatted := [][]string{} + defaultIndex := 0 + for _, virtualCluster := range virtualClusters { + if virtualClusterName != "" && virtualCluster.VirtualCluster.Name != virtualClusterName { + continue + } else if spaceName != "" && virtualCluster.VirtualCluster.Namespace != spaceName { + continue + } else if clusterName != "" && virtualCluster.Cluster != clusterName { + continue + } + + if isLoftContext && vCluster == virtualCluster.VirtualCluster.Name && cluster == virtualCluster.Cluster && namespace == virtualCluster.VirtualCluster.Namespace { + defaultIndex = len(questionOptionsUnformatted) + } + + matchedVClusters = append(matchedVClusters, virtualCluster) + vClusterName := virtualCluster.VirtualCluster.Name + if virtualCluster.VirtualCluster.Annotations != nil && virtualCluster.VirtualCluster.Annotations["loft.sh/display-name"] != "" { + vClusterName = virtualCluster.VirtualCluster.Annotations["loft.sh/display-name"] + " (" + vClusterName + ")" + } + + questionOptionsUnformatted = append(questionOptionsUnformatted, []string{vClusterName, virtualCluster.VirtualCluster.Namespace, virtualCluster.Cluster}) + } + + questionOptions := formatOptions("vCluster: %s | Space: %s | Cluster: %s", questionOptionsUnformatted) + if len(questionOptions) == 0 { + if virtualClusterName == "" { + return "", "", "", fmt.Errorf("couldn't find any virtual cluster") + } else if spaceName != "" { + return "", "", "", fmt.Errorf("couldn't find virtualcluster %s in space %s in cluster %s", ansi.Color(virtualClusterName, "white+b"), ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) + } else if clusterName != "" { + return "", "", "", fmt.Errorf("couldn't find virtualcluster %s in space %s in cluster %s", ansi.Color(virtualClusterName, "white+b"), ansi.Color(spaceName, "white+b"), ansi.Color(clusterName, "white+b")) + } + + return "", "", "", fmt.Errorf("couldn't find virtual cluster %s", ansi.Color(virtualClusterName, "white+b")) + } else if len(questionOptions) == 1 { + return matchedVClusters[0].VirtualCluster.Name, matchedVClusters[0].VirtualCluster.Namespace, matchedVClusters[0].Cluster, nil + } + + selectedSpace, err := log.Question(&survey.QuestionOptions{ + Question: "Please choose a virtual cluster to use", + DefaultValue: questionOptions[defaultIndex], + Options: questionOptions, + }) + if err != nil { + return "", "", "", err + } + + for idx, s := range questionOptions { + if s == selectedSpace { + clusterName = matchedVClusters[idx].Cluster + virtualClusterName = matchedVClusters[idx].VirtualCluster.Name + spaceName = matchedVClusters[idx].VirtualCluster.Namespace + break + } + } + + return virtualClusterName, spaceName, clusterName, nil +} + +func formatOptions(format string, options [][]string) []string { + if len(options) == 0 { + return []string{} + } + + columnLengths := make([]int, len(options[0])) + for _, row := range options { + for i, column := range row { + if len(column) > columnLengths[i] { + columnLengths[i] = len(column) + } + } + } + + retOptions := []string{} + for _, row := range options { + columns := []interface{}{} + for i := range row { + value := row[i] + if columnLengths[i] > len(value) { + value = value + strings.Repeat(" ", columnLengths[i]-len(value)) + } + + columns = append(columns, value) + } + + retOptions = append(retOptions, fmt.Sprintf(format, columns...)) + } + + return retOptions +} + +func getProjectSpaceInstance(ctx context.Context, managementClient kube.Interface, project *managementv1.Project, spaceName string) (*SpaceInstanceProject, error) { + spaceInstance := &managementv1.SpaceInstance{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("spaceinstances"). + Namespace(projectutil.ProjectNamespace(project.Name)). + Name(spaceName). + VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(spaceInstance) + if err != nil { + return nil, err + } + + if !spaceInstance.Status.CanUse { + return nil, fmt.Errorf("no use access") + } + + return &SpaceInstanceProject{ + SpaceInstance: spaceInstance, + Project: project, + }, nil +} + +func getProjectSpaceInstances(ctx context.Context, managementClient kube.Interface, project *managementv1.Project) ([]*SpaceInstanceProject, error) { + spaceInstanceList := &managementv1.SpaceInstanceList{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("spaceinstances"). + Namespace(projectutil.ProjectNamespace(project.Name)). + VersionedParams(&metav1.ListOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(spaceInstanceList) + if err != nil { + return nil, err + } + + var spaces []*SpaceInstanceProject + for _, spaceInstance := range spaceInstanceList.Items { + if !spaceInstance.Status.CanUse { + continue + } + + s := spaceInstance + spaces = append(spaces, &SpaceInstanceProject{ + SpaceInstance: &s, + Project: project, + }) + } + return spaces, nil +} + +func getProjectVirtualClusterInstance(ctx context.Context, managementClient kube.Interface, project *managementv1.Project, virtualClusterName string) (*VirtualClusterInstanceProject, error) { + virtualClusterInstance := &managementv1.VirtualClusterInstance{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("virtualclusterinstances"). + Namespace(projectutil.ProjectNamespace(project.Name)). + Name(virtualClusterName). + VersionedParams(&metav1.GetOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(virtualClusterInstance) + if err != nil { + return nil, err + } + + if !virtualClusterInstance.Status.CanUse { + return nil, fmt.Errorf("no use access") + } + + return &VirtualClusterInstanceProject{ + VirtualCluster: virtualClusterInstance, + Project: project, + }, nil +} + +func getProjectVirtualClusterInstances(ctx context.Context, managementClient kube.Interface, project *managementv1.Project) ([]*VirtualClusterInstanceProject, error) { + virtualClusterInstanceList := &managementv1.VirtualClusterInstanceList{} + err := managementClient.Loft().ManagementV1().RESTClient(). + Get(). + Resource("virtualclusterinstances"). + Namespace(projectutil.ProjectNamespace(project.Name)). + VersionedParams(&metav1.ListOptions{}, scheme.ParameterCodec). + Param("extended", "true"). + Do(ctx). + Into(virtualClusterInstanceList) + if err != nil { + return nil, err + } + + var virtualClusters []*VirtualClusterInstanceProject + for _, virtualClusterInstance := range virtualClusterInstanceList.Items { + if !virtualClusterInstance.Status.CanUse { + continue + } + + v := virtualClusterInstance + virtualClusters = append(virtualClusters, &VirtualClusterInstanceProject{ + VirtualCluster: &v, + Project: project, + }) + } + return virtualClusters, nil +} diff --git a/pkg/platform/kube/client.go b/pkg/platform/kube/client.go new file mode 100644 index 000000000..21699183b --- /dev/null +++ b/pkg/platform/kube/client.go @@ -0,0 +1,54 @@ +package kube + +import ( + agentloftclient "github.com/loft-sh/agentapi/v4/pkg/client/loft/clientset_generated/clientset" + loftclient "github.com/loft-sh/api/v4/pkg/client/clientset_generated/clientset" + + "github.com/pkg/errors" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +type Interface interface { + kubernetes.Interface + Loft() loftclient.Interface + Agent() agentloftclient.Interface +} + +func NewForConfig(c *rest.Config) (Interface, error) { + kubeClient, err := kubernetes.NewForConfig(c) + if err != nil { + return nil, errors.Wrap(err, "create kube client") + } + + loftClient, err := loftclient.NewForConfig(c) + if err != nil { + return nil, errors.Wrap(err, "create loft client") + } + + agentLoftClient, err := agentloftclient.NewForConfig(c) + if err != nil { + return nil, errors.Wrap(err, "create kiosk client") + } + + return &client{ + Interface: kubeClient, + loftClient: loftClient, + agentLoftClient: agentLoftClient, + }, nil +} + +type client struct { + kubernetes.Interface + loftClient loftclient.Interface + agentLoftClient agentloftclient.Interface +} + +func (c *client) Loft() loftclient.Interface { + return c.loftClient +} + +func (c *client) Agent() agentloftclient.Interface { + return c.agentLoftClient +} diff --git a/pkg/platform/kubeconfig/kubeconfig.go b/pkg/platform/kubeconfig/kubeconfig.go new file mode 100644 index 000000000..602ecf63e --- /dev/null +++ b/pkg/platform/kubeconfig/kubeconfig.go @@ -0,0 +1,266 @@ +package kubeconfig + +import ( + "io" + "os" + "path/filepath" + "strings" + + "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" +) + +type ContextOptions struct { + Name string + Server string + CaData []byte + ConfigPath string + InsecureSkipTLSVerify bool + DirectClusterEndpointEnabled bool + VirtualClusterAccessPointEnabled bool + + Token string + ClientKeyData []byte + ClientCertificateData []byte + + CurrentNamespace string + SetActive bool +} + +func SpaceInstanceContextName(projectName, spaceInstanceName string) string { + return "loft_" + spaceInstanceName + "_" + projectName +} + +func VirtualClusterInstanceContextName(projectName, virtualClusterInstance string) string { + return "loft-vcluster_" + virtualClusterInstance + "_" + projectName +} + +func virtualClusterInstanceProjectAndNameFromContextName(contextName string) (string, string) { + return strings.Split(contextName, "_")[2], strings.Split(contextName, "_")[1] +} + +func SpaceContextName(clusterName, namespaceName string) string { + contextName := "loft_" + if namespaceName != "" { + contextName += namespaceName + "_" + } + + contextName += clusterName + return contextName +} + +func VirtualClusterContextName(clusterName, namespaceName, virtualClusterName string) string { + return "loft-vcluster_" + virtualClusterName + "_" + namespaceName + "_" + clusterName +} + +func ManagementContextName() string { + return "loft-management" +} + +func ParseContext(contextName string) (isLoftContext bool, cluster string, namespace string, vCluster string) { + splitted := strings.Split(contextName, "_") + if len(splitted) == 0 || (splitted[0] != "loft" && splitted[0] != "loft-vcluster") { + return false, "", "", "" + } + + // cluster or space context + if splitted[0] == "loft" { + if len(splitted) > 3 || len(splitted) == 1 { + return false, "", "", "" + } else if len(splitted) == 2 { + return true, splitted[1], "", "" + } + + return true, splitted[2], splitted[1], "" + } + + // vCluster context + if len(splitted) != 4 { + return false, "", "", "" + } + + return true, splitted[3], splitted[2], splitted[1] +} + +func CurrentContext() (string, error) { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return "", err + } + + return config.CurrentContext, nil +} + +// DeleteContext deletes the context with the given name from the kube config +func DeleteContext(contextName string) error { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return err + } + + delete(config.Contexts, contextName) + delete(config.Clusters, contextName) + delete(config.AuthInfos, contextName) + + if config.CurrentContext == contextName { + config.CurrentContext = "" + for name := range config.Contexts { + config.CurrentContext = name + break + } + } + + // Save the config + return clientcmd.ModifyConfig(clientcmd.NewDefaultClientConfigLoadingRules(), config, false) +} + +func updateKubeConfig(contextName string, cluster *api.Cluster, authInfo *api.AuthInfo, namespaceName string, setActive bool) error { + config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{}).RawConfig() + if err != nil { + return err + } + + config.Clusters[contextName] = cluster + config.AuthInfos[contextName] = authInfo + + // Update kube context + context := api.NewContext() + context.Cluster = contextName + context.AuthInfo = contextName + context.Namespace = namespaceName + + config.Contexts[contextName] = context + if setActive { + config.CurrentContext = contextName + } + + // Save the config + return clientcmd.ModifyConfig(clientcmd.NewDefaultClientConfigLoadingRules(), config, false) +} + +func printKubeConfigTo(contextName string, cluster *api.Cluster, authInfo *api.AuthInfo, namespaceName string, writer io.Writer) error { + config := api.NewConfig() + + config.Clusters[contextName] = cluster + config.AuthInfos[contextName] = authInfo + + // Update kube context + context := api.NewContext() + context.Cluster = contextName + context.AuthInfo = contextName + context.Namespace = namespaceName + + config.Contexts[contextName] = context + config.CurrentContext = contextName + + // set kind & version + config.APIVersion = "v1" + config.Kind = "Config" + + out, err := clientcmd.Write(*config) + if err != nil { + return err + } + + _, err = writer.Write(out) + return err +} + +// UpdateKubeConfig updates the kube config and adds the virtual cluster context +func UpdateKubeConfig(options ContextOptions) error { + contextName, cluster, authInfo, err := createContext(options) + if err != nil { + return err + } + + // we don't want to set the space name here as the default namespace in the virtual cluster, because it couldn't exist + return updateKubeConfig(contextName, cluster, authInfo, options.CurrentNamespace, options.SetActive) +} + +// PrintKubeConfigTo prints the given config to the writer +func PrintKubeConfigTo(options ContextOptions, writer io.Writer) error { + contextName, cluster, authInfo, err := createContext(options) + if err != nil { + return err + } + + // we don't want to set the space name here as the default namespace in the virtual cluster, because it couldn't exist + return printKubeConfigTo(contextName, cluster, authInfo, options.CurrentNamespace, writer) +} + +// PrintTokenKubeConfig writes the kube config to the os.Stdout +func PrintTokenKubeConfig(restConfig *rest.Config, token string) error { + contextName, cluster, authInfo := createTokenContext(restConfig, token) + + return printKubeConfigTo(contextName, cluster, authInfo, "", os.Stdout) +} + +// WriteTokenKubeConfig writes the kube config to the io.Writer +func WriteTokenKubeConfig(restConfig *rest.Config, token string, w io.Writer) error { + contextName, cluster, authInfo := createTokenContext(restConfig, token) + + return printKubeConfigTo(contextName, cluster, authInfo, "", w) +} + +func createTokenContext(restConfig *rest.Config, token string) (string, *api.Cluster, *api.AuthInfo) { + contextName := "default" + + cluster := api.NewCluster() + cluster.Server = restConfig.Host + cluster.InsecureSkipTLSVerify = restConfig.Insecure + cluster.CertificateAuthority = restConfig.CAFile + cluster.CertificateAuthorityData = restConfig.CAData + cluster.TLSServerName = restConfig.ServerName + + authInfo := api.NewAuthInfo() + authInfo.Token = token + + return contextName, cluster, authInfo +} + +func createContext(options ContextOptions) (string, *api.Cluster, *api.AuthInfo, error) { + contextName := options.Name + cluster := api.NewCluster() + cluster.Server = options.Server + cluster.CertificateAuthorityData = options.CaData + cluster.InsecureSkipTLSVerify = options.InsecureSkipTLSVerify + + authInfo := api.NewAuthInfo() + if options.Token != "" || options.ClientCertificateData != nil || options.ClientKeyData != nil { + authInfo.Token = options.Token + authInfo.ClientKeyData = options.ClientKeyData + authInfo.ClientCertificateData = options.ClientCertificateData + } else { + command, err := os.Executable() + if err != nil { + return "", nil, nil, err + } + + absConfigPath, err := filepath.Abs(options.ConfigPath) + if err != nil { + return "", nil, nil, err + } + + if options.VirtualClusterAccessPointEnabled { + projectName, virtualClusterName := virtualClusterInstanceProjectAndNameFromContextName(contextName) + authInfo.Exec = &api.ExecConfig{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Command: command, + Args: []string{"token", "--silent", "--project", projectName, "--virtual-cluster", virtualClusterName}, + } + } else { + authInfo.Exec = &api.ExecConfig{ + APIVersion: v1beta1.SchemeGroupVersion.String(), + Command: command, + Args: []string{"token", "--silent", "--config", absConfigPath}, + } + if options.DirectClusterEndpointEnabled { + authInfo.Exec.Args = append(authInfo.Exec.Args, "--direct-cluster-endpoint") + } + } + } + + return contextName, cluster, authInfo, nil +} diff --git a/pkg/platform/loftutils/positional_args.go b/pkg/platform/loftutils/positional_args.go new file mode 100644 index 000000000..08f9f23fa --- /dev/null +++ b/pkg/platform/loftutils/positional_args.go @@ -0,0 +1,69 @@ +package util + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +var ( + SpaceNameOnlyUseLine string + SpaceNameOnlyValidator cobra.PositionalArgs + + VClusterNameOnlyUseLine string + + VClusterNameOnlyValidator cobra.PositionalArgs +) + +func init() { + SpaceNameOnlyUseLine, SpaceNameOnlyValidator = NamedPositionalArgsValidator(true, true, "SPACE_NAME") + VClusterNameOnlyUseLine, VClusterNameOnlyValidator = NamedPositionalArgsValidator(true, true, "VCLUSTER_NAME") +} + +// NamedPositionalArgsValidator returns a cobra.PositionalArgs that returns a helpful +// error message if the arg number doesn't match. +// It also returns a string that can be appended to the cobra useline +// +// Example output for extra arguments with : +// +// $ command arg asdf +// [fatal] command ARG_1 [flags] +// Invalid Args: received 2 arguments, expected 1, extra arguments: "asdf" +// Run with --help for more details +// +// Example output for missing arguments: +// +// $ command +// [fatal] command ARG_1 [flags] +// Invalid Args: received 0 arguments, expected 1, please specify missing: "ARG_!" +// Run with --help for more details on arguments +func NamedPositionalArgsValidator(failMissing, failExtra bool, expectedArgs ...string) (string, cobra.PositionalArgs) { + return " " + strings.Join(expectedArgs, " "), func(cmd *cobra.Command, args []string) error { + numExpectedArgs := len(expectedArgs) + numArgs := len(args) + numMissing := numExpectedArgs - numArgs + + if numMissing == 0 { + return nil + } + + // didn't receive as many arguments as expected + if numMissing > 0 && failMissing { + // the last numMissing expectedArgs + missingKeys := strings.Join(expectedArgs[len(expectedArgs)-(numMissing):], ", ") + return fmt.Errorf("%s\nInvalid Args: received %d arguments, expected %d, please specify missing: %q\nRun with --help for more details on arguments", cmd.UseLine(), numArgs, numExpectedArgs, missingKeys) + } + + // received more than expected + if numMissing < 0 && failExtra { + // received more than expected + numExtra := -numMissing + // the last numExtra args + extraValues := strings.Join(args[len(args)-numExtra:], ", ") + return fmt.Errorf("%s\nInvalid Args: received %d arguments, expected %d, extra arguments: %q\nRun with --help for more details on arguments", cmd.UseLine(), numArgs, numExpectedArgs, extraValues) + } + + return nil + } +} diff --git a/pkg/platform/loftutils/positional_args_test.go b/pkg/platform/loftutils/positional_args_test.go new file mode 100644 index 000000000..ac45cb4d1 --- /dev/null +++ b/pkg/platform/loftutils/positional_args_test.go @@ -0,0 +1,55 @@ +package util + +import ( + "fmt" + "testing" + + "github.com/spf13/cobra" + "gotest.tools/v3/assert" +) + +func TestNamedPositionalArgsValidator(t *testing.T) { + // loop through a generated variety of inputs: arg counts, expected arg counts, and failMissing + // since it depends on the numbers, it's easier to loop than writing a testable + maxExpectedArgCount := 5 + maxActualArgsCount := maxExpectedArgCount + 5 + expectedArgs := []string{} + testNum := 0 + // loop through maxExpectedArgCount lengths of expectedArgs + for len(expectedArgs) <= maxExpectedArgCount { + actualArgs := []string{} + // loop through maxActualArgCount lengths of actualArgs + for len(actualArgs) <= maxActualArgsCount { + defer func() { + panicErr := recover() + if panicErr != nil { + t.Fatalf("this function should never panic: %+v", panicErr) + } + }() + testNum++ + // loop through both values of failMissing + for _, failMissing := range []bool{true, false} { + for _, failExtra := range []bool{true, false} { + // execute test + t.Logf("running test #%d with failMissing %v, failExtra %v, expectedArgs: %q, args: %q", testNum, failMissing, failExtra, expectedArgs, actualArgs) + // if testNum == 23 { + // t.Log("focus a test number number for debugging") + // } + _, validator := NamedPositionalArgsValidator(failMissing, failExtra, expectedArgs...) + err := validator(&cobra.Command{}, actualArgs) + if len(actualArgs) > len(expectedArgs) && failExtra { + assert.ErrorContains(t, err, "extra arguments:", "expect error to not be nil as arg count is mismatched") + } else if len(actualArgs) < len(expectedArgs) && failMissing { + assert.ErrorContains(t, err, "please specify missing:", "expect error to not be nil as arg count is mismatched") + } else { + assert.NilError(t, err, "expect error to be nil as all args provided and no extra") + } + // append to actual args + actualArgs = append(actualArgs, fmt.Sprintf("ARG_%d", len(actualArgs))) + } + } + } + // append to expected args + expectedArgs = append(expectedArgs, fmt.Sprintf("ARG_NAME_%d", len(expectedArgs))) + } +} diff --git a/pkg/platform/loftutils/util.go b/pkg/platform/loftutils/util.go new file mode 100644 index 000000000..bda639922 --- /dev/null +++ b/pkg/platform/loftutils/util.go @@ -0,0 +1,26 @@ +package util + +import ( + "errors" + + kerrors "k8s.io/apimachinery/pkg/api/errors" +) + +func GetCause(err error) string { + if err == nil { + return "" + } + + var statusErr *kerrors.StatusError + + if errors.As(err, &statusErr) { + details := statusErr.Status().Details + if details != nil && len(details.Causes) > 0 { + return details.Causes[0].Message + } + + return statusErr.Error() + } + + return err.Error() +}