diff --git a/pkg/kapp/cmd/app/delete.go b/pkg/kapp/cmd/app/delete.go index 97613d62e..7a8b292d9 100644 --- a/pkg/kapp/cmd/app/delete.go +++ b/pkg/kapp/cmd/app/delete.go @@ -179,7 +179,13 @@ func (o *DeleteOptions) existingResources(app ctlapp.App, return nil, false, err } - existingResources, err := supportObjs.IdentifiedResources.List(labelSelector, nil, ctlres.IdentifiedResourcesListOpts{}) + meta, err := app.Meta() + if err != nil { + return nil, false, err + } + + existingResources, err := supportObjs.IdentifiedResources.List(labelSelector, nil, ctlres.IdentifiedResourcesListOpts{ + ResourceNamespaces: meta.LastChange.Namespaces}) if err != nil { return nil, false, err } diff --git a/pkg/kapp/cmd/app/deploy.go b/pkg/kapp/cmd/app/deploy.go index ad98e58b6..25cfb29ad 100644 --- a/pkg/kapp/cmd/app/deploy.go +++ b/pkg/kapp/cmd/app/deploy.go @@ -152,8 +152,13 @@ func (o *DeployOptions) Run() error { return err } + meta, err := app.Meta() + if err != nil { + return err + } + existingResources, existingPodRs, err := o.existingResources( - newResources, labeledResources, resourceFilter, supportObjs.Apps, usedGKs) + newResources, labeledResources, resourceFilter, supportObjs.Apps, usedGKs, append(meta.LastChange.Namespaces, nsNames...)) if err != nil { return err } @@ -204,7 +209,7 @@ func (o *DeployOptions) Run() error { if o.DeployFlags.Logs { cancelLogsCh := make(chan struct{}) defer func() { close(cancelLogsCh) }() - go o.showLogs(supportObjs.CoreClient, supportObjs.IdentifiedResources, existingPodRs, labelSelector, cancelLogsCh) + go o.showLogs(supportObjs.CoreClient, supportObjs.IdentifiedResources, existingPodRs, labelSelector, cancelLogsCh, append(meta.LastChange.Namespaces, nsNames...)) } defer func() { @@ -347,7 +352,7 @@ func (o *DeployOptions) newResourcesFromFiles() ([]ctlres.Resource, error) { func (o *DeployOptions) existingResources(newResources []ctlres.Resource, labeledResources *ctlres.LabeledResources, resourceFilter ctlres.ResourceFilter, - apps ctlapp.Apps, usedGKs []schema.GroupKind) ([]ctlres.Resource, []ctlres.Resource, error) { + apps ctlapp.Apps, usedGKs []schema.GroupKind, resourceNamespaces []string) ([]ctlres.Resource, []ctlres.Resource, error) { labelErrorResolutionFunc := func(key string, val string) string { items, _ := apps.List(nil) @@ -371,7 +376,8 @@ func (o *DeployOptions) existingResources(newResources []ctlres.Resource, //Scope resource searching to UsedGKs IdentifiedResourcesListOpts: ctlres.IdentifiedResourcesListOpts{ - GKsScope: usedGKs, + GKsScope: usedGKs, + ResourceNamespaces: resourceNamespaces, }, } @@ -486,7 +492,7 @@ const ( func (o *DeployOptions) showLogs( coreClient kubernetes.Interface, identifiedResources ctlres.IdentifiedResources, - existingPodRs []ctlres.Resource, labelSelector labels.Selector, cancelCh chan struct{}) { + existingPodRs []ctlres.Resource, labelSelector labels.Selector, cancelCh chan struct{}, resourceNamespaces []string) { existingPodsByUID := map[string]struct{}{} @@ -520,7 +526,7 @@ func (o *DeployOptions) showLogs( podWatcher := ctlres.FilteringPodWatcher{ podMatcherFunc, - identifiedResources.PodResources(labelSelector), + identifiedResources.PodResources(labelSelector, resourceNamespaces), } contFilterFunc := func(pod corev1.Pod) []string { diff --git a/pkg/kapp/cmd/app/inspect.go b/pkg/kapp/cmd/app/inspect.go index 044b36dd9..2c7cb3bcd 100644 --- a/pkg/kapp/cmd/app/inspect.go +++ b/pkg/kapp/cmd/app/inspect.go @@ -75,7 +75,13 @@ func (o *InspectOptions) Run() error { return err } - resources, err := supportObjs.IdentifiedResources.List(labelSelector, nil, resources.IdentifiedResourcesListOpts{}) + meta, err := app.Meta() + if err != nil { + return err + } + + resources, err := supportObjs.IdentifiedResources.List(labelSelector, nil, resources.IdentifiedResourcesListOpts{ + ResourceNamespaces: meta.LastChange.Namespaces}) if err != nil { return err } diff --git a/pkg/kapp/cmd/app/logs.go b/pkg/kapp/cmd/app/logs.go index 74ea77f41..69f63876c 100644 --- a/pkg/kapp/cmd/app/logs.go +++ b/pkg/kapp/cmd/app/logs.go @@ -71,7 +71,7 @@ func (o *LogsOptions) Run() error { } return true }, - supportObjs.IdentifiedResources.PodResources(labelSelector), + supportObjs.IdentifiedResources.PodResources(labelSelector, nil), } contFilter := func(pod corev1.Pod) []string { diff --git a/pkg/kapp/resources/identified_resources_list.go b/pkg/kapp/resources/identified_resources_list.go index 9c71cdb3e..21bb98056 100644 --- a/pkg/kapp/resources/identified_resources_list.go +++ b/pkg/kapp/resources/identified_resources_list.go @@ -14,6 +14,7 @@ import ( type IdentifiedResourcesListOpts struct { IgnoreCachedResTypes bool GKsScope []schema.GroupKind + ResourceNamespaces []string } func (r IdentifiedResources) List(labelSelector labels.Selector, resRefs []ResourceRef, opts IdentifiedResourcesListOpts) ([]Resource, error) { @@ -49,6 +50,7 @@ func (r IdentifiedResources) List(labelSelector labels.Selector, resRefs []Resou ListOpts: &metav1.ListOptions{ LabelSelector: labelSelector.String(), }, + ResourceNamespaces: opts.ResourceNamespaces, } resources, err := r.resources.All(resTypes, allOpts) diff --git a/pkg/kapp/resources/identified_resources_pods.go b/pkg/kapp/resources/identified_resources_pods.go index 71c01f0f0..e3e8352bf 100644 --- a/pkg/kapp/resources/identified_resources_pods.go +++ b/pkg/kapp/resources/identified_resources_pods.go @@ -5,6 +5,8 @@ package resources import ( "fmt" + "strings" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -12,8 +14,8 @@ import ( "k8s.io/client-go/kubernetes" ) -func (r IdentifiedResources) PodResources(labelSelector labels.Selector) UniquePodWatcher { - return UniquePodWatcher{labelSelector, r.fallbackAllowedNamespaces, r.coreClient} +func (r IdentifiedResources) PodResources(labelSelector labels.Selector, resourceNamespaces []string) UniquePodWatcher { + return UniquePodWatcher{labelSelector, uniqAndValidNamespaces(append(r.fallbackAllowedNamespaces, resourceNamespaces...)), r.coreClient} } type PodWatcherI interface { @@ -34,32 +36,34 @@ func (w UniquePodWatcher) Watch(podsToWatchCh chan corev1.Pod, cancelCh chan str go func() { // Watch Pods in all namespaces first and fallback to the // fallbackAllowedNamespaces if lack of permission - namespace := "" - for { + namespaces := []string{""} + namespaces = append(namespaces, w.fallbackAllowedNamespaces...) + var forbiddenNamespaces []string + + for _, namespace := range namespaces { podWatcher := NewPodWatcher( w.coreClient.CoreV1().Pods(namespace), metav1.ListOptions{LabelSelector: w.labelSelector.String()}, ) - err := podWatcher.Watch(nonUniquePodsToWatchCh, cancelCh) if err == nil { - break - } - if errors.IsForbidden(err) && namespace == "" { - // The '-n' flag or default state namespace can specify only 1 namespace, so there - // should be at most 1 item in fallbackAllowedNamespaces - if len(w.fallbackAllowedNamespaces) > 0 { - namespace = w.fallbackAllowedNamespaces[0] - if namespace == "" { - break - } + if namespace == "" { + break } - } else { + continue + } + if !errors.IsForbidden(err) { fmt.Printf("Pod watching error: %s\n", err) // TODO break } + if namespace != "" { + forbiddenNamespaces = append(forbiddenNamespaces, fmt.Sprintf(`"%s"`, namespace)) + } } + if len(forbiddenNamespaces) > 0 { + fmt.Printf(`Pod watching error: pods is forbidden: User cannot list resource "pods" in API group "" in the namespace(s) %s`, strings.Join(forbiddenNamespaces, ", ")) + } close(nonUniquePodsToWatchCh) }() diff --git a/pkg/kapp/resources/resources.go b/pkg/kapp/resources/resources.go index 9006a9171..39a42c9c1 100644 --- a/pkg/kapp/resources/resources.go +++ b/pkg/kapp/resources/resources.go @@ -96,15 +96,8 @@ func (c *ResourcesImpl) All(resTypes []ResourceType, opts AllOpts) ([]Resource, opts.ListOpts = &metav1.ListOptions{} } - nsScope := "" // all namespaces by default - nsScopeLimited := c.opts.ScopeToFallbackAllowedNamespaces && len(c.opts.FallbackAllowedNamespaces) == 1 - - // Eagerly use single fallback namespace to avoid making all-namespaces request - // just to see it fail, and fallback to making namespace-scoped request - if nsScopeLimited { - nsScope = c.opts.FallbackAllowedNamespaces[0] - c.logger.Info("Scoping listings to single namespace: %s", nsScope) - } + // Populate FallbackAllowedNamespace with resource namespaces stored during deploy + c.opts.FallbackAllowedNamespaces = uniqAndValidNamespaces(append(c.opts.FallbackAllowedNamespaces, opts.ResourceNamespaces...)) unstructItemsCh := make(chan unstructItems, len(resTypes)) fatalErrsCh := make(chan error, len(resTypes)) @@ -124,16 +117,23 @@ func (c *ResourcesImpl) All(resTypes []ResourceType, opts AllOpts) ([]Resource, client := c.mutedDynamicClient.Resource(resType.GroupVersionResource) - err = util.Retry2(time.Second, 5*time.Second, c.isServerRescaleErr, func() error { - if resType.Namespaced() { - list, err = client.Namespace(nsScope).List(context.TODO(), *opts.ListOpts) - } else { - list, err = client.List(context.TODO(), *opts.ListOpts) + // If resource is cluster scoped or request is not scoped to fallback + // allowed namespaces manually, then scope list to all namespaces + if !c.opts.ScopeToFallbackAllowedNamespaces || !resType.Namespaced() { + err = util.Retry2(time.Second, 5*time.Second, c.isServerRescaleErr, func() error { + if resType.Namespaced() { + list, err = client.Namespace("").List(context.TODO(), *opts.ListOpts) + } else { + list, err = client.List(context.TODO(), *opts.ListOpts) + } + return err + }) + + if err == nil { + unstructItemsCh <- unstructItems{resType, list.Items} + return } - return err - }) - if err != nil { if !errors.IsForbidden(err) { // Ignore certain GVs due to failing API backing if c.resourceTypes.CanIgnoreFailingGroupVersion(resType.GroupVersion()) { @@ -143,27 +143,24 @@ func (c *ResourcesImpl) All(resTypes []ResourceType, opts AllOpts) ([]Resource, } return } - // At this point err==Forbidden... - // In case ns scope is limited already, we will not gain anything - // by trying to run namespace scoped lists for allowed namespaced - // (ie since it's would be same request that just failed) - if !resType.Namespaced() || nsScopeLimited { + if !resType.Namespaced() { c.logger.Debug("Skipping forbidden group version: %#v", resType.GroupVersionResource) return } + } - // TODO improve perf somehow - list, err = c.allForNamespaces(client, opts.ListOpts) - if err != nil { - // Ignore certain GVs due to failing API backing - if c.resourceTypes.CanIgnoreFailingGroupVersion(resType.GroupVersion()) { - c.logger.Info("Ignoring group version: %#v", resType.GroupVersionResource) - } else { - fatalErrsCh <- fmt.Errorf("Listing %#v, namespaced: %t: %w", resType.GroupVersionResource, resType.Namespaced(), err) - } - return + // At this point err==Forbidden... + // or requests are scoped to fallback allowed namespaces manually + list, err = c.allForNamespaces(client, opts.ListOpts) + if err != nil { + // Ignore certain GVs due to failing API backing + if c.resourceTypes.CanIgnoreFailingGroupVersion(resType.GroupVersion()) { + c.logger.Info("Ignoring group version: %#v", resType.GroupVersionResource) + } else { + fatalErrsCh <- fmt.Errorf("Listing %#v, namespaced: %t: %w", resType.GroupVersionResource, resType.Namespaced(), err) } + return } unstructItemsCh <- unstructItems{resType, list.Items} @@ -207,8 +204,13 @@ func (c *ResourcesImpl) allForNamespaces(client dynamic.NamespaceableResourceInt go func() { defer itemsDone.Done() + var resList *unstructured.UnstructuredList + var err error - resList, err := client.Namespace(ns).List(context.TODO(), *listOpts) + err = util.Retry2(time.Second, 5*time.Second, c.isServerRescaleErr, func() error { + resList, err = client.Namespace(ns).List(context.TODO(), *listOpts) + return err + }) if err != nil { if !errors.IsForbidden(err) { fatalErrsCh <- err @@ -538,6 +540,10 @@ func (c *ResourcesImpl) assumedAllowedNamespaces() ([]string, error) { return *c.assumedAllowedNamespacesMemo, nil } + if c.opts.ScopeToFallbackAllowedNamespaces { + return c.opts.FallbackAllowedNamespaces, nil + } + nsList, err := c.coreClient.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) if err != nil { if errors.IsForbidden(err) { @@ -593,8 +599,23 @@ func (c *ResourcesImpl) isEtcdRetryableError(err error) bool { return etcdserverRetryableErrCheck.MatchString(err.Error()) } +func uniqAndValidNamespaces(in []string) []string { + var out []string + if len(in) > 0 { + uniqNamespaces := map[string]struct{}{} + for _, ns := range in { + if _, exists := uniqNamespaces[ns]; !exists && ns != "(cluster)" { + out = append(out, ns) + uniqNamespaces[ns] = struct{}{} + } + } + } + return out +} + type AllOpts struct { - ListOpts *metav1.ListOptions + ListOpts *metav1.ListOptions + ResourceNamespaces []string } type resourceStatusErr struct { diff --git a/test/e2e/cluster_resource.go b/test/e2e/cluster_resource.go index 063304636..15cac63a0 100644 --- a/test/e2e/cluster_resource.go +++ b/test/e2e/cluster_resource.go @@ -36,7 +36,7 @@ func NewPresentClusterResource(kind, name, ns string, kubectl Kubectl) ClusterRe args = append(args, "--show-managed-fields") } - out, _ := kubectl.RunWithOpts(args, RunOpts{}) + out, _ := kubectl.RunWithOpts(args, RunOpts{NoNamespace: true}) return ClusterResource{ctlres.MustNewResourceFromBytes([]byte(out))} } @@ -60,7 +60,7 @@ func RemoveClusterResource(t *testing.T, kind, name, ns string, kubectl Kubectl) } func PatchClusterResource(kind, name, ns, patch string, kubectl Kubectl) { - kubectl.Run([]string{"patch", kind, name, "--type=json", "--patch", patch, "-n", ns}) + kubectl.RunWithOpts([]string{"patch", kind, name, "--type=json", "--patch", patch, "-n", ns}, RunOpts{NoNamespace: true}) } func ClusterResourceExists(kind, name string, kubectl Kubectl) (bool, error) { diff --git a/test/e2e/fallback_allowed_ns_test.go b/test/e2e/fallback_allowed_ns_test.go new file mode 100644 index 000000000..4ed080e69 --- /dev/null +++ b/test/e2e/fallback_allowed_ns_test.go @@ -0,0 +1,358 @@ +// Copyright 2022 VMware, Inc. +// SPDX-License-Identifier: Apache-2.0 + +package e2e + +import ( + "encoding/base64" + "fmt" + "strings" + "testing" + + uitest "github.com/cppforlife/go-cli-ui/ui/test" + "github.com/stretchr/testify/require" +) + +func TestFallbackAllowedNamespaces(t *testing.T) { + env := BuildEnv(t) + logger := Logger{} + kapp := Kapp{t, env.Namespace, env.KappBinaryPath, logger} + kubectl := Kubectl{t, env.Namespace, logger} + + testNamespace := "test-fallback-allowed-namespace" + testNamespace2 := "test-fallback-allowed-namespace-2" + + rbac := ` +--- +apiVersion: v1 +kind: Namespace +metadata: + name: __test-ns__ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: scoped-sa + namespace: __ns__ +--- +apiVersion: v1 +kind: Secret +metadata: + name: scoped-sa + namespace: __ns__ + annotations: + kubernetes.io/service-account.name: scoped-sa +type: kubernetes.io/service-account-token +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scoped-role + namespace: __ns__ +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["*"] +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scoped-role + namespace: __test-ns__ +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["*"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scoped-role-binding + namespace: __ns__ +subjects: +- kind: ServiceAccount + name: scoped-sa + namespace: __ns__ +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: scoped-role + namespace: __ns__ +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: scoped-role-binding + namespace: __test-ns__ +subjects: +- kind: ServiceAccount + name: scoped-sa + namespace: __ns__ +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: scoped-role + namespace: __ns__ +` + + rbac = strings.ReplaceAll(rbac, "__ns__", env.Namespace) + rbac = strings.ReplaceAll(rbac, "__test-ns__", testNamespace) + + rbacName := "test-e2e-rbac-app" + scopedContext := "scoped-context" + scopedUser := "scoped-user" + appName := "test-fallback-allowed-namespace" + + cleanUp := func() { + kapp.Run([]string{"delete", "-a", rbacName}) + kapp.Run([]string{"delete", "-a", appName}) + RemoveClusterResource(t, "ns", testNamespace2, "", kubectl) + } + cleanUp() + defer cleanUp() + + kapp.RunWithOpts([]string{"deploy", "-a", rbacName, "-f", "-"}, RunOpts{StdinReader: strings.NewReader(rbac)}) + cleanUpContext := ScopedContext(t, kubectl, "scoped-sa", scopedContext, scopedUser) + defer cleanUpContext() + + yaml1 := fmt.Sprintf(` +apiVersion: "v1" +kind: ConfigMap +metadata: + name: cm-1 + namespace: %s +data: + foo: bar +--- +apiVersion: "v1" +kind: ConfigMap +metadata: + name: cm-2 + namespace: %s +data: + foo: bar +--- +apiVersion: "v1" +kind: ConfigMap +metadata: + name: cm-3 + namespace: %s +data: + foo: bar +`, env.Namespace, testNamespace, testNamespace) + + yaml2 := fmt.Sprintf(` +apiVersion: "v1" +kind: ConfigMap +metadata: + name: cm-1 + namespace: %s +data: + foo: bar +--- +apiVersion: "v1" +kind: ConfigMap +metadata: + name: cm-2 + namespace: %s +data: + foo: bar +`, env.Namespace, testNamespace) + + logger.Section("deploy app using scoped context", func() { + out, _ := kapp.RunWithOpts([]string{"deploy", "-a", appName, "-f", "-", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}, + RunOpts{StdinReader: strings.NewReader(yaml1)}) + + // Expect pod watching error for the fallback allowed namespaces as listing pods is not allowed. + require.Contains(t, out, fmt.Sprintf(`Pod watching error: pods is forbidden: User cannot list resource "pods" in API group "" in the namespace(s) "%s", "%s"`, + env.Namespace, testNamespace)) + + NewPresentClusterResource("configmap", "cm-1", env.Namespace, kubectl) + NewPresentClusterResource("configmap", "cm-2", testNamespace, kubectl) + NewPresentClusterResource("configmap", "cm-3", testNamespace, kubectl) + }) + + logger.Section("inspect app using scoped context", func() { + out := kapp.Run([]string{"inspect", "-a", appName, "--json", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}) + + expectedResources := []map[string]string{{ + "age": "", + "kind": "ConfigMap", + "name": "cm-1", + "namespace": env.Namespace, + "owner": "kapp", + "reconcile_info": "", + "reconcile_state": "ok", + }, { + "age": "", + "kind": "ConfigMap", + "name": "cm-2", + "namespace": testNamespace, + "owner": "kapp", + "reconcile_info": "", + "reconcile_state": "ok", + }, { + "age": "", + "kind": "ConfigMap", + "name": "cm-3", + "namespace": testNamespace, + "owner": "kapp", + "reconcile_info": "", + "reconcile_state": "ok", + }} + + resp := uitest.JSONUIFromBytes(t, []byte(out)) + + require.Equalf(t, expectedResources, replaceAge((resp.Tables[0].Rows)), "Expected resources to match") + }) + + logger.Section("delete one configmap and deploy again using scoped context", func() { + kapp.RunWithOpts([]string{"deploy", "-a", appName, "-f", "-", fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}, + RunOpts{StdinReader: strings.NewReader(yaml2)}) + + NewPresentClusterResource("configmap", "cm-1", env.Namespace, kubectl) + NewPresentClusterResource("configmap", "cm-2", testNamespace, kubectl) + NewMissingClusterResource(t, "configmap", "cm-3", testNamespace, kubectl) + }) + + logger.Section("delete app", func() { + kapp.Run([]string{"delete", "-a", appName, fmt.Sprintf("--kubeconfig-context=%s", scopedContext)}) + + NewMissingClusterResource(t, "configmap", "cm-1", env.Namespace, kubectl) + NewMissingClusterResource(t, "configmap", "cm-2", testNamespace, kubectl) + NewMissingClusterResource(t, "configmap", "cm-3", testNamespace, kubectl) + }) + + logger.Section("deploy app with admin permission but scope-to-fallback-allowed-namespaces", func() { + kapp.RunWithOpts([]string{"deploy", "-a", appName, "-f", "-", "--dangerous-scope-to-fallback-allowed-namespaces"}, + RunOpts{StdinReader: strings.NewReader(yaml1)}) + + NewPresentClusterResource("configmap", "cm-1", env.Namespace, kubectl) + NewPresentClusterResource("configmap", "cm-2", testNamespace, kubectl) + NewPresentClusterResource("configmap", "cm-3", testNamespace, kubectl) + }) + + logger.Section("inspect app without scope-to-fallback-allowed-namespaces", func() { + const appLabelKey string = "kapp.k14s.io/app" + NewClusterResource(t, "ns", testNamespace2, "", kubectl) + NewClusterResource(t, "cm", "cm-4", testNamespace2, kubectl) + labels := NewPresentClusterResource("cm", "cm-2", testNamespace, kubectl).Labels() + appLabel := labels[appLabelKey] + + patch := fmt.Sprintf(`[{ "op": "add", "path": "/metadata/labels", "value": {%s: "%s"}}]`, appLabelKey, appLabel) + PatchClusterResource("cm", "cm-4", testNamespace2, patch, kubectl) + + out := kapp.Run([]string{"inspect", "-a", appName, "--json"}) + + // Should get the newly added configmap + expectedResources := []map[string]string{{ + "age": "", + "kind": "ConfigMap", + "name": "cm-1", + "namespace": env.Namespace, + "owner": "kapp", + "reconcile_info": "", + "reconcile_state": "ok", + }, { + "age": "", + "kind": "ConfigMap", + "name": "cm-2", + "namespace": testNamespace, + "owner": "kapp", + "reconcile_info": "", + "reconcile_state": "ok", + }, { + "age": "", + "kind": "ConfigMap", + "name": "cm-3", + "namespace": testNamespace, + "owner": "kapp", + "reconcile_info": "", + "reconcile_state": "ok", + }, { + "age": "", + "kind": "ConfigMap", + "name": "cm-4", + "namespace": testNamespace2, + "owner": "cluster", + "reconcile_info": "", + "reconcile_state": "ok", + }} + + resp := uitest.JSONUIFromBytes(t, []byte(out)) + + require.Equalf(t, expectedResources, replaceAge((resp.Tables[0].Rows)), "Expected resources to match") + }) + + logger.Section("inspect app with scope-to-fallback-allowed-namespaces", func() { + out := kapp.Run([]string{"inspect", "-a", appName, "--json", "--dangerous-scope-to-fallback-allowed-namespaces"}) + + // Shouldn't get the newly added configmap + expectedResources := []map[string]string{{ + "age": "", + "kind": "ConfigMap", + "name": "cm-1", + "namespace": env.Namespace, + "owner": "kapp", + "reconcile_info": "", + "reconcile_state": "ok", + }, { + "age": "", + "kind": "ConfigMap", + "name": "cm-2", + "namespace": testNamespace, + "owner": "kapp", + "reconcile_info": "", + "reconcile_state": "ok", + }, { + "age": "", + "kind": "ConfigMap", + "name": "cm-3", + "namespace": testNamespace, + "owner": "kapp", + "reconcile_info": "", + "reconcile_state": "ok", + }} + + resp := uitest.JSONUIFromBytes(t, []byte(out)) + + require.Equalf(t, expectedResources, replaceAge((resp.Tables[0].Rows)), "Expected resources to match") + }) + + logger.Section("delete one configmap and deploy again with scope-to-fallback-allowed-namespaces", func() { + kapp.RunWithOpts([]string{"deploy", "-a", appName, "-f", "-", "--dangerous-scope-to-fallback-allowed-namespaces"}, + RunOpts{StdinReader: strings.NewReader(yaml2)}) + + NewPresentClusterResource("configmap", "cm-1", env.Namespace, kubectl) + NewPresentClusterResource("configmap", "cm-2", testNamespace, kubectl) + NewMissingClusterResource(t, "configmap", "cm-3", testNamespace, kubectl) + }) + + logger.Section("delete app", func() { + kapp.Run([]string{"delete", "-a", appName, "--dangerous-scope-to-fallback-allowed-namespaces"}) + + NewMissingClusterResource(t, "configmap", "cm-1", env.Namespace, kubectl) + NewMissingClusterResource(t, "configmap", "cm-2", testNamespace, kubectl) + NewMissingClusterResource(t, "configmap", "cm-3", testNamespace, kubectl) + }) +} + +func ScopedContext(t *testing.T, kubectl Kubectl, serviceAccountName, contextName, userName string) func() { + token := kubectl.Run([]string{"get", "secret", "scoped-sa", "-o", "jsonpath={.data.token}"}) + + tokenDecoded, err := base64.StdEncoding.DecodeString(token) + require.NoError(t, err) + + currentContextCluster := kubectl.Run([]string{"config", "view", "--minify", "-o", "jsonpath={.clusters[].name}"}) + + kubectl.RunWithOpts([]string{"config", "set-credentials", userName, fmt.Sprintf("--token=%s", string(tokenDecoded))}, + RunOpts{NoNamespace: true, Redact: true}) + + kubectl.RunWithOpts([]string{"config", "set-context", contextName, fmt.Sprintf("--user=%s", userName), fmt.Sprintf("--cluster=%s", currentContextCluster)}, + RunOpts{NoNamespace: true}) + + return func() { + kubectl.Run([]string{"config", "delete-context", contextName}) + kubectl.Run([]string{"config", "delete-user", userName}) + } +} diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index 9bb992596..6e86067f4 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -30,7 +30,7 @@ func (k Kubectl) RunWithOpts(args []string, opts RunOpts) (string, error) { args = append(args, []string{"-n", k.namespace}...) } - k.l.Debugf("Running '%s'...\n", k.cmdDesc(args)) + k.l.Debugf("Running '%s'...\n", k.cmdDesc(args, opts)) var stderr bytes.Buffer var stdout bytes.Buffer @@ -59,12 +59,16 @@ func (k Kubectl) RunWithOpts(args []string, opts RunOpts) (string, error) { if err != nil { err = fmt.Errorf("Execution error: stderr: '%s' error: '%s'", stderr.String(), err) - require.Truef(k.t, opts.AllowError, "Failed to successfully execute '%s': %v", k.cmdDesc(args), err) + require.Truef(k.t, opts.AllowError, "Failed to successfully execute '%s': %v", k.cmdDesc(args, opts), err) } return stdout.String(), err } -func (k Kubectl) cmdDesc(args []string) string { - return fmt.Sprintf("kubectl %s", strings.Join(args, " ")) +func (k Kubectl) cmdDesc(args []string, opts RunOpts) string { + prefix := "kubectl" + if opts.Redact { + return prefix + " -redacted-" + } + return fmt.Sprintf("%s %s", prefix, strings.Join(args, " ")) } diff --git a/test/e2e/pod_log_test.go b/test/e2e/pod_log_test.go index 20a29e5bf..2811727d6 100644 --- a/test/e2e/pod_log_test.go +++ b/test/e2e/pod_log_test.go @@ -52,9 +52,9 @@ spec: logger.Section("Show logs for new Pods only when annotation value is default", func() { out, _ := kapp.RunWithOpts([]string{"deploy", "-f", "-", "-a", name}, RunOpts{IntoNs: true, StdinReader: strings.NewReader(fmt.Sprintf(yaml, 1, ""))}) - NewPresentClusterResource("Pod", "simple-app-0", "default", kubectl) + NewPresentClusterResource("Pod", "simple-app-0", env.Namespace, kubectl) out, _ = kapp.RunWithOpts([]string{"deploy", "-f", "-", "-a", name}, RunOpts{IntoNs: true, StdinReader: strings.NewReader(fmt.Sprintf(yaml, 2, ""))}) - NewPresentClusterResource("Pod", "simple-app-1", "default", kubectl) + NewPresentClusterResource("Pod", "simple-app-1", env.Namespace, kubectl) require.NotContains(t, out, "logs | simple-app-0 > demo-container | ", "Should not contain log for the existing Pod") require.Contains(t, out, "logs | simple-app-1 > demo-container | ", "Should contain log for the new Pod") }) @@ -63,9 +63,9 @@ spec: logger.Section("Show logs only for existing Pods with for-existing annotation value", func() { out, _ := kapp.RunWithOpts([]string{"deploy", "-f", "-", "-a", name}, RunOpts{IntoNs: true, StdinReader: strings.NewReader(fmt.Sprintf(yaml, 1, "for-existing"))}) - NewPresentClusterResource("Pod", "simple-app-0", "default", kubectl) + NewPresentClusterResource("Pod", "simple-app-0", env.Namespace, kubectl) out, _ = kapp.RunWithOpts([]string{"deploy", "-f", "-", "-a", name}, RunOpts{IntoNs: true, StdinReader: strings.NewReader(fmt.Sprintf(yaml, 2, "for-existing"))}) - NewPresentClusterResource("Pod", "simple-app-1", "default", kubectl) + NewPresentClusterResource("Pod", "simple-app-1", env.Namespace, kubectl) require.Contains(t, out, "logs | simple-app-0 > demo-container | ", "Should contain log for the existing Pod") require.NotContains(t, out, "logs | simple-app-1 > demo-container | ", "Should not contain log for the new Pod") })