From 8f9a1fee15eabe61a13bd1481bb418448e18e043 Mon Sep 17 00:00:00 2001 From: Sebastian Widmer Date: Wed, 4 Sep 2024 12:23:43 +0200 Subject: [PATCH] Implement legacy ResourceQuota and LimitRange generation Replaces https://hub.syn.tools/appuio-cloud/references/policies/11_generate_quota_limit_range_in_ns.html. Also includes a webhook to deny edits to the synced resources. --- Makefile.vars.mk | 2 +- config.go | 13 ++ config.yaml | 67 +++++++++ config/webhook/manifests.yaml | 23 +++ .../legacy_resource_quota_controller.go | 140 ++++++++++++++++++ .../legacy_resource_quota_controller_test.go | 89 +++++++++++ go.mod | 2 +- main.go | 56 ++++++- ...rved_resourcequota_limitrange_validator.go | 62 ++++++++ ...resourcequota_limitrange_validator_test.go | 64 ++++++++ whoami/whoami.go | 38 +++++ whoami/whoami_test.go | 60 ++++++++ 12 files changed, 613 insertions(+), 3 deletions(-) create mode 100644 controllers/legacy_resource_quota_controller.go create mode 100644 controllers/legacy_resource_quota_controller_test.go create mode 100644 webhooks/reserved_resourcequota_limitrange_validator.go create mode 100644 webhooks/reserved_resourcequota_limitrange_validator_test.go create mode 100644 whoami/whoami.go create mode 100644 whoami/whoami_test.go diff --git a/Makefile.vars.mk b/Makefile.vars.mk index 20ea85f..d43ea14 100644 --- a/Makefile.vars.mk +++ b/Makefile.vars.mk @@ -16,7 +16,7 @@ CONTAINER_IMG ?= local.dev/$(PROJECT_OWNER)/$(PROJECT_NAME):$(IMG_TAG) LOCALBIN ?= $(shell pwd)/bin ENVTEST ?= $(LOCALBIN)/setup-envtest -ENVTEST_K8S_VERSION = 1.26.1 +ENVTEST_K8S_VERSION = 1.28.3 ## KIND:setup diff --git a/config.go b/config.go index 11e4057..b49691f 100644 --- a/config.go +++ b/config.go @@ -7,6 +7,7 @@ import ( "github.com/appuio/appuio-cloud-agent/limits" "go.uber.org/multierr" "gopkg.in/inf.v0" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "sigs.k8s.io/yaml" ) @@ -71,6 +72,18 @@ type Config struct { PodRunOnceActiveDeadlineSecondsOverrideAnnotation string // PodRunOnceActiveDeadlineSecondsDefault is the default activeDeadlineSeconds for RunOnce pods. PodRunOnceActiveDeadlineSecondsDefault int + + // LegacyResourceQuotaAnnotationBase is the base label for the default resource quotas. + // The actual annotation is `$base/$quotaname.$resource`. + LegacyResourceQuotaAnnotationBase string + // LegacyDefaultResourceQuotas is a map containing the default resource quotas for each organization. + // The keys are the name of the manifest and the values are the resource quotas spec. + LegacyDefaultResourceQuotas map[string]corev1.ResourceQuotaSpec + + // LegacyLimitRangeName is the name of the default limit range. + LegacyLimitRangeName string + // LegacyDefaultLimitRange is the default limit range. + LegacyDefaultLimitRange corev1.LimitRangeSpec } func ConfigFromFile(path string) (c Config, warn []string, err error) { diff --git a/config.yaml b/config.yaml index 4166e73..d14f171 100644 --- a/config.yaml +++ b/config.yaml @@ -52,3 +52,70 @@ AllowedLabels: [appuio.io/organization] PodRunOnceActiveDeadlineSecondsOverrideAnnotation: appuio.io/active-deadline-seconds-override # PodRunOnceActiveDeadlineSecondsDefault is the default activeDeadlineSeconds for RunOnce pods. PodRunOnceActiveDeadlineSecondsDefault: 1800 + +# LegacyResourceQuotaAnnotationBase is the base label for the default resource quotas. +# The actual annotation is `$base/$quotaname.$resource`. +LegacyResourceQuotaAnnotationBase: resourcequota.appuio.io +# LegacyDefaultResourceQuotas is a map containing the default resource quotas for each organization. +# The keys are the name of the manifest and the values are the resource quotas spec. +LegacyDefaultResourceQuotas: + # See https://kb.vshn.ch/appuio-cloud/references/quality-requirements/performance/resource-quota.html + organization-objects: + hard: + count/configmaps: "150" + count/jobs.batch: "150" + count/secrets: "150" + count/services: "20" + count/services.loadbalancers: "0" + count/services.nodeports: "0" + count/replicationcontrollers: "100" + openshift.io/imagestreams: "20" + openshift.io/imagestreamtags: "50" + + requests.storage: 1000Gi + persistentvolumeclaims: "10" + localblock-storage.storageclass.storage.k8s.io/persistentvolumeclaims: "0" + requests.ephemeral-storage: "250Mi" + limits.ephemeral-storage: "500Mi" + + # Limit the total amount of Rook-Ceph backed storage which can be + # requested per namespace + cephfs-fspool-cluster.storageclass.storage.k8s.io/requests.storage: 25Gi + rbd-storagepool-cluster.storageclass.storage.k8s.io/requests.storage: 25Gi + + organization-compute: + hard: + requests.cpu: 4 + requests.memory: 4Gi + limits.cpu: 8 + limits.memory: 20Gi + pods: "45" + scopes: + - NotTerminating + + organization-compute-terminating: + hard: + limits.cpu: 4000m + limits.memory: 4Gi + pods: "5" + requests.cpu: 500m + requests.memory: 2Gi + scopes: + - Terminating + +# LegacyLimitRangeName is the name of the default limit range. +LegacyLimitRangeName: organization +# LegacyDefaultLimitRange is the default limit range. +LegacyDefaultLimitRange: + limits: + - type: Container + min: + cpu: "10m" + memory: "4Mi" + ephemeral-storage: "100Ki" + default: + cpu: "600m" + memory: "768Mi" + defaultRequest: + cpu: "10m" + memory: "100Mi" diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index c84b606..2c7f8bc 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -94,6 +94,29 @@ kind: ValidatingWebhookConfiguration metadata: name: validating-webhook-configuration webhooks: + - admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-reserved-resourcequota-limitrange + failurePolicy: Fail + matchPolicy: Equivalent + name: reserved-resourcequota-limitrange-validator.appuio.io + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + - DELETE + resources: + - resourcequotas + - limitranges + sideEffects: None - admissionReviewVersions: - v1 clientConfig: diff --git a/controllers/legacy_resource_quota_controller.go b/controllers/legacy_resource_quota_controller.go new file mode 100644 index 0000000..7a0998d --- /dev/null +++ b/controllers/legacy_resource_quota_controller.go @@ -0,0 +1,140 @@ +package controllers + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "go.uber.org/multierr" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// LegacyResourceQuotaReconciler reconciles namespaces and synchronizes their resource quotas +type LegacyResourceQuotaReconciler struct { + client.Client + Scheme *runtime.Scheme + Recorder record.EventRecorder + + OrganizationLabel string + + ResourceQuotaAnnotationBase string + DefaultResourceQuotas map[string]corev1.ResourceQuotaSpec + + LimitRangeName string + DefaultLimitRange corev1.LimitRangeSpec +} + +func (r *LegacyResourceQuotaReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := log.FromContext(ctx) + l.Info("Reconciling Namespace") + + var ns corev1.Namespace + if err := r.Get(ctx, req.NamespacedName, &ns); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + if ns.DeletionTimestamp != nil { + l.Info("Namespace is being deleted, skipping reconciliation") + return ctrl.Result{}, nil + } + + if _, ok := ns.Labels[r.OrganizationLabel]; !ok { + l.Info("Namespace does not have organization label, skipping reconciliation") + return ctrl.Result{}, nil + } + + var errs []error + for name, s := range r.DefaultResourceQuotas { + spec := *s.DeepCopy() + + var storageQuotas corev1.ResourceList + if sqa := ns.Annotations[fmt.Sprintf("%s/%s.storageclasses", r.ResourceQuotaAnnotationBase, name)]; sqa != "" { + err := json.Unmarshal([]byte(ns.Annotations[fmt.Sprintf("%s/%s.storageclasses", r.ResourceQuotaAnnotationBase, name)]), &storageQuotas) + if err != nil { + errs = append(errs, fmt.Errorf("failed to unmarshal storage classes: %w", err)) + storageQuotas = make(corev1.ResourceList) + } + } else { + storageQuotas = make(corev1.ResourceList) + } + + rq := &corev1.ResourceQuota{ + ObjectMeta: ctrl.ObjectMeta{ + Name: name, + Namespace: ns.Name, + }, + } + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, rq, func() error { + for k := range spec.Hard { + an := fmt.Sprintf("%s/%s.%s", r.ResourceQuotaAnnotationBase, name, strings.ReplaceAll(string(k), "/", "_")) + if strings.Contains(string(k), "storageclass.storage.k8s.io") { + if _, ok := storageQuotas[k]; ok { + spec.Hard[k] = storageQuotas[k] + } + } else if a := ns.Annotations[an]; a != "" { + po, err := resource.ParseQuantity(a) + if err != nil { + errs = append(errs, fmt.Errorf("failed to parse quantity %s=%s: %w", an, a, err)) + continue + } + spec.Hard[k] = po + } + } + + rq.Spec = spec + return controllerutil.SetControllerReference(&ns, rq, r.Scheme) + }) + if err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile ResourceQuota %s: %w", name, err)) + } + if op != controllerutil.OperationResultNone { + l.Info("Reconciled ResourceQuota", "name", name, "operation", op) + } + } + + lr := &corev1.LimitRange{ + ObjectMeta: ctrl.ObjectMeta{ + Name: r.LimitRangeName, + Namespace: ns.Name, + }, + } + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, lr, func() error { + lr.Spec = *r.DefaultLimitRange.DeepCopy() + return controllerutil.SetControllerReference(&ns, lr, r.Scheme) + }) + if err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile LimitRange %s: %w", r.LimitRangeName, err)) + } + if op != controllerutil.OperationResultNone { + l.Info("Reconciled LimitRange", "name", r.LimitRangeName, "operation", op) + } + + if err := multierr.Combine(errs...); err != nil { + r.Recorder.Eventf(&ns, corev1.EventTypeWarning, "ReconcileError", "Failed to reconcile ResourceQuotas and LimitRanges: %s", err.Error()) + return ctrl.Result{}, fmt.Errorf("failed to reconcile ResourceQuotas and LimitRanges: %w", err) + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *LegacyResourceQuotaReconciler) SetupWithManager(mgr ctrl.Manager) error { + orgPredicate, err := labelExistsPredicate(r.OrganizationLabel) + if err != nil { + return fmt.Errorf("failed to create organization label predicate: %w", err) + } + return ctrl.NewControllerManagedBy(mgr). + Named("legacyresourcequota"). + For(&corev1.Namespace{}, builder.WithPredicates(orgPredicate)). + Owns(&corev1.ResourceQuota{}). + Owns(&corev1.LimitRange{}). + Complete(r) +} diff --git a/controllers/legacy_resource_quota_controller_test.go b/controllers/legacy_resource_quota_controller_test.go new file mode 100644 index 0000000..df851bb --- /dev/null +++ b/controllers/legacy_resource_quota_controller_test.go @@ -0,0 +1,89 @@ +package controllers + +import ( + "context" + "testing" + + "github.com/go-logr/logr/testr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +func Test_LegacyResourceQuotaReconciler_Reconcile(t *testing.T) { + t.Parallel() + + subjectNamespace := newNamespace("test", map[string]string{"organization": "testorg"}, nil) + + c, scheme, recorder := prepareClient(t, subjectNamespace) + ctx := log.IntoContext(context.Background(), testr.New(t)) + + subject := LegacyResourceQuotaReconciler{ + Client: c, + Scheme: scheme, + Recorder: recorder, + + OrganizationLabel: "organization", + + ResourceQuotaAnnotationBase: "resourcequota.example.com", + DefaultResourceQuotas: map[string]corev1.ResourceQuotaSpec{ + "orgq": { + Hard: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("10"), + corev1.ResourceRequestsMemory: resource.MustParse("10Gi"), + "count/services.loadbalancers": resource.MustParse("10"), + "localblock-storage.storageclass.storage.k8s.io/persistentvolumeclaims": resource.MustParse("10"), + "cephfs-fspool-cluster.storageclass.storage.k8s.io/requests.storage": resource.MustParse("10"), + "openshift.io/imagestreamtags": resource.MustParse("10"), + }, + }, + }, + + LimitRangeName: "limitrange", + DefaultLimitRange: corev1.LimitRangeSpec{ + Limits: []corev1.LimitRangeItem{ + { + Type: corev1.LimitTypeContainer, + Default: corev1.ResourceList{ + corev1.ResourceLimitsCPU: resource.MustParse("1"), + }, + }, + }, + }, + } + + _, err := subject.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: subjectNamespace.Name}}) + require.NoError(t, err) + + var syncedRQ corev1.ResourceQuota + require.NoError(t, c.Get(ctx, types.NamespacedName{Name: "orgq", Namespace: "test"}, &syncedRQ)) + require.Equal(t, subject.DefaultResourceQuotas["orgq"], syncedRQ.Spec) + + var syncedLR corev1.LimitRange + require.NoError(t, c.Get(ctx, types.NamespacedName{Name: "limitrange", Namespace: "test"}, &syncedLR)) + require.Equal(t, subject.DefaultLimitRange, syncedLR.Spec) + + subjectNamespace.Annotations = map[string]string{ + "resourcequota.example.com/orgq.storageclasses": `{"cephfs-fspool-cluster.storageclass.storage.k8s.io/requests.storage":"5"}`, + "resourcequota.example.com/orgq.limits.cpu": "5", + "resourcequota.example.com/orgq.count_services.loadbalancers": "5", + "resourcequota.example.com/orgq.openshift.io_imagestreamtags": "5", + } + require.NoError(t, c.Update(ctx, subjectNamespace)) + + _, err = subject.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: subjectNamespace.Name}}) + require.NoError(t, err) + + require.NoError(t, c.Get(ctx, types.NamespacedName{Name: "orgq", Namespace: "test"}, &syncedRQ)) + assert.Equal(t, "5", ptr.To(syncedRQ.Spec.Hard[corev1.ResourceLimitsCPU]).String()) + assert.Equal(t, "5", ptr.To(syncedRQ.Spec.Hard["count/services.loadbalancers"]).String()) + assert.Equal(t, "5", ptr.To(syncedRQ.Spec.Hard["openshift.io/imagestreamtags"]).String()) + assert.Equal(t, "5", ptr.To(syncedRQ.Spec.Hard["cephfs-fspool-cluster.storageclass.storage.k8s.io/requests.storage"]).String()) + assert.Equal(t, "10", ptr.To(syncedRQ.Spec.Hard["localblock-storage.storageclass.storage.k8s.io/persistentvolumeclaims"]).String()) + assert.Equal(t, "10Gi", ptr.To(syncedRQ.Spec.Hard[corev1.ResourceRequestsMemory]).String()) +} diff --git a/go.mod b/go.mod index 3bc48b9..15b49fb 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( k8s.io/api v0.31.0 k8s.io/apimachinery v0.31.0 k8s.io/client-go v0.31.0 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/controller-runtime v0.19.0 sigs.k8s.io/controller-tools v0.16.1 sigs.k8s.io/kind v0.24.0 @@ -86,7 +87,6 @@ require ( k8s.io/apiextensions-apiserver v0.31.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34 // indirect - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/main.go b/main.go index 0857e1c..7c1c254 100644 --- a/main.go +++ b/main.go @@ -3,13 +3,16 @@ package main import ( "context" "flag" + "maps" "os" + "slices" "time" controlv1 "github.com/appuio/control-api/apis/v1" projectv1 "github.com/openshift/api/project/v1" userv1 "github.com/openshift/api/user/v1" "go.uber.org/multierr" + authenticationv1 "k8s.io/api/authentication/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -17,6 +20,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cluster" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -28,6 +32,7 @@ import ( "github.com/appuio/appuio-cloud-agent/ratio" "github.com/appuio/appuio-cloud-agent/skipper" "github.com/appuio/appuio-cloud-agent/webhooks" + whoamicli "github.com/appuio/appuio-cloud-agent/whoami" ) var ( @@ -89,6 +94,9 @@ func main() { var legacyNamespaceQuotaEnabled bool flag.BoolVar(&legacyNamespaceQuotaEnabled, "legacy-namespace-quota-enabled", false, "Enable the legacy namespace quota controller. This controller is deprecated and will be removed in the future.") + var legacyResourceQuotaEnabled bool + flag.BoolVar(&legacyResourceQuotaEnabled, "legacy-resource-quota-enabled", false, "Enable the legacy resource quota controller. This controller is deprecated and will be removed in the future.") + var podRunOnceActiveDeadlineSecondsMutatorEnabled bool flag.BoolVar(&podRunOnceActiveDeadlineSecondsMutatorEnabled, "pod-run-once-active-deadline-seconds-mutator-enabled", false, "Enable the PodRunOnceActiveDeadlineSecondsMutator webhook. Adds .spec.activeDeadlineSeconds to pods with the restartPolicy set to 'OnFailure' or 'Never'.") @@ -229,11 +237,27 @@ func main() { os.Exit(1) } } + if legacyResourceQuotaEnabled { + if err := (&controllers.LegacyResourceQuotaReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("legacy-resource-quota-controller"), + + OrganizationLabel: conf.OrganizationLabel, + ResourceQuotaAnnotationBase: conf.LegacyResourceQuotaAnnotationBase, + DefaultResourceQuotas: conf.LegacyDefaultResourceQuotas, + LimitRangeName: conf.LegacyLimitRangeName, + DefaultLimitRange: conf.LegacyDefaultLimitRange, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "legacy-resource-quota-controller") + os.Exit(1) + } + } psk := &skipper.PrivilegedUserSkipper{ Client: mgr.GetClient(), - PrivilegedUsers: conf.PrivilegedUsers, + PrivilegedUsers: append(conf.PrivilegedUsers, whoami(mgr).Username), PrivilegedGroups: conf.PrivilegedGroups, PrivilegedClusterRoles: conf.PrivilegedClusterRoles, } @@ -270,6 +294,19 @@ func main() { }, }) + mgr.GetWebhookServer().Register("/validate-reserved-resourcequota-limitrange", &webhook.Admission{ + Handler: &webhooks.ReservedResourceQuotaLimitRangeValidator{ + Decoder: admission.NewDecoder(mgr.GetScheme()), + Skipper: skipper.NewMultiSkipper( + skipper.StaticSkipper{ShouldSkip: !legacyResourceQuotaEnabled}, + psk, + ), + + ReservedResourceQuotaNames: slices.Collect(maps.Keys(conf.LegacyDefaultResourceQuotas)), + ReservedLimitRangeNames: []string{conf.LegacyLimitRangeName}, + }, + }) + mgr.GetWebhookServer().Register("/mutate-namespace-project-organization", &webhook.Admission{ Handler: &webhooks.NamespaceProjectOrganizationMutator{ Decoder: admission.NewDecoder(mgr.GetScheme()), @@ -338,6 +375,23 @@ func main() { } } +func whoami(mgr manager.Manager) authenticationv1.UserInfo { + wc, err := whoamicli.WhoamiForConfigAndClient(mgr.GetConfig(), mgr.GetHTTPClient()) + if err != nil { + setupLog.Error(err, "unable to create whoami client") + os.Exit(1) + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + userInfo, err := wc.Whoami(ctx) + if err != nil { + setupLog.Error(err, "unable to get current user") + os.Exit(1) + } + setupLog.Info("I am", "userinfo", userInfo) + return userInfo +} + func registerNodeSelectorValidationWebhooks(mgr ctrl.Manager, conf Config) { mgr.GetWebhookServer().Register("/mutate-pod-node-selector", &webhook.Admission{ Handler: &webhooks.PodNodeSelectorMutator{ diff --git a/webhooks/reserved_resourcequota_limitrange_validator.go b/webhooks/reserved_resourcequota_limitrange_validator.go new file mode 100644 index 0000000..554be43 --- /dev/null +++ b/webhooks/reserved_resourcequota_limitrange_validator.go @@ -0,0 +1,62 @@ +package webhooks + +import ( + "context" + "net/http" + "slices" + + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + "github.com/appuio/appuio-cloud-agent/skipper" +) + +// +kubebuilder:webhook:path=/validate-reserved-resourcequota-limitrange,name=reserved-resourcequota-limitrange-validator.appuio.io,admissionReviewVersions=v1,sideEffects=none,mutating=false,failurePolicy=Fail,groups="",resources=resourcequotas;limitranges,verbs=create;update;delete,versions=v1,matchPolicy=equivalent + +// ReservedResourceQuotaLimitRangeValidator denies changes to reserved resourcequota and limitrange objects. +type ReservedResourceQuotaLimitRangeValidator struct { + Decoder admission.Decoder + + Skipper skipper.Skipper + + ReservedResourceQuotaNames []string + ReservedLimitRangeNames []string +} + +// Handle handles the admission requests +func (v *ReservedResourceQuotaLimitRangeValidator) Handle(ctx context.Context, req admission.Request) admission.Response { + ctx = log.IntoContext(ctx, log.FromContext(ctx). + WithName("webhook.reserved-resourcequota-limitrange-validator.appuio.io"). + WithValues("id", req.UID, "user", req.UserInfo.Username). + WithValues("namespace", req.Namespace, "name", req.Name, + "group", req.Kind.Group, "version", req.Kind.Version, "kind", req.Kind.Kind)) + + return logAdmissionResponse(ctx, v.handle(ctx, req)) +} + +func (v *ReservedResourceQuotaLimitRangeValidator) handle(ctx context.Context, req admission.Request) admission.Response { + l := log.FromContext(ctx) + + skip, err := v.Skipper.Skip(ctx, req) + if err != nil { + l.Error(err, "error while checking skipper") + return admission.Errored(http.StatusInternalServerError, err) + } + if skip { + return admission.Allowed("skipped") + } + + if req.Kind.Kind == "ResourceQuota" { + if slices.Contains(v.ReservedResourceQuotaNames, req.Name) { + return admission.Denied("reserved ResourceQuota object") + } + } + + if req.Kind.Kind == "LimitRange" { + if slices.Contains(v.ReservedLimitRangeNames, req.Name) { + return admission.Denied("reserved LimitRange object") + } + } + + return admission.Allowed("allowed") +} diff --git a/webhooks/reserved_resourcequota_limitrange_validator_test.go b/webhooks/reserved_resourcequota_limitrange_validator_test.go new file mode 100644 index 0000000..5c53114 --- /dev/null +++ b/webhooks/reserved_resourcequota_limitrange_validator_test.go @@ -0,0 +1,64 @@ +package webhooks + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/appuio/appuio-cloud-agent/skipper" +) + +func Test_ReservedResourceQuotaLimitRangeValidator_Handle(t *testing.T) { + t.Parallel() + + _, scheme, decoder := prepareClient(t) + + subject := ReservedResourceQuotaLimitRangeValidator{ + Decoder: decoder, + Skipper: skipper.StaticSkipper{}, + + ReservedResourceQuotaNames: []string{"org"}, + ReservedLimitRangeNames: []string{"org"}, + } + + testCases := []struct { + name string + subject client.Object + allowed bool + }{ + { + name: "LimitRange: reserved name", + subject: &corev1.LimitRange{ObjectMeta: metav1.ObjectMeta{Name: "org"}}, + allowed: false, + }, + { + name: "LimitRange: allowed name", + subject: &corev1.LimitRange{ObjectMeta: metav1.ObjectMeta{Name: "not-org"}}, + allowed: true, + }, + { + name: "ResourceQuota: reserved name", + subject: &corev1.ResourceQuota{ObjectMeta: metav1.ObjectMeta{Name: "org"}}, + allowed: false, + }, + { + name: "ResourceQuota: allowed name", + subject: &corev1.ResourceQuota{ObjectMeta: metav1.ObjectMeta{Name: "not-org"}}, + allowed: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + resp := subject.Handle(context.Background(), admissionRequestForObject(t, tc.subject, scheme)) + t.Log("Response:", resp.Result.Reason, resp.Result.Message) + require.Equal(t, tc.allowed, resp.Allowed) + }) + } +} diff --git a/whoami/whoami.go b/whoami/whoami.go new file mode 100644 index 0000000..8ccd028 --- /dev/null +++ b/whoami/whoami.go @@ -0,0 +1,38 @@ +package whoami + +import ( + "context" + "fmt" + "net/http" + + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + authenticationv1cli "k8s.io/client-go/kubernetes/typed/authentication/v1" + "k8s.io/client-go/rest" +) + +// Whoami can return the current user from a self subject review. +type Whoami struct { + Client authenticationv1cli.SelfSubjectReviewInterface +} + +// WhoamiForConfigAndClient creates a new Whoami instance for the given config and client. +func WhoamiForConfigAndClient(c *rest.Config, h *http.Client) (*Whoami, error) { + client, err := authenticationv1cli.NewForConfigAndClient(c, h) + if err != nil { + return nil, fmt.Errorf("error while creating self subject review client: %w", err) + } + return &Whoami{ + Client: client.SelfSubjectReviews(), + }, nil +} + +// Whoami returns the current user from a self subject review. +func (s *Whoami) Whoami(ctx context.Context) (authenticationv1.UserInfo, error) { + ssr, err := s.Client.Create(ctx, &authenticationv1.SelfSubjectReview{}, metav1.CreateOptions{}) + if err != nil { + return authenticationv1.UserInfo{}, fmt.Errorf("error while creating self subject review: %w", err) + } + + return ssr.Status.UserInfo, nil +} diff --git a/whoami/whoami_test.go b/whoami/whoami_test.go new file mode 100644 index 0000000..24c77e1 --- /dev/null +++ b/whoami/whoami_test.go @@ -0,0 +1,60 @@ +package whoami_test + +import ( + "context" + "testing" + + "github.com/appuio/appuio-cloud-agent/whoami" + "github.com/go-logr/logr/testr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +func Test_SelfSkipper_Skip(t *testing.T) { + scheme := runtime.NewScheme() + require.NoError(t, clientgoscheme.AddToScheme(scheme)) + require.NoError(t, authenticationv1.AddToScheme(scheme)) + + cfg, stop := setupEnvtestEnv(t) + defer stop() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + l := testr.New(t) + + mgr, err := manager.New(cfg, manager.Options{ + Scheme: scheme, + Logger: l, + }) + require.NoError(t, err) + + subject, err := whoami.WhoamiForConfigAndClient(mgr.GetConfig(), mgr.GetHTTPClient()) + require.NoError(t, err) + + ssr, err := subject.Client.Create(ctx, &authenticationv1.SelfSubjectReview{}, metav1.CreateOptions{}) + t.Log(ssr) + require.NoError(t, err) + + ui, err := subject.Whoami(ctx) + assert.Equal(t, ssr.Status.UserInfo, ui) +} + +func setupEnvtestEnv(t *testing.T) (cfg *rest.Config, stop func()) { + t.Helper() + + testEnv := &envtest.Environment{} + + cfg, err := testEnv.Start() + require.NoError(t, err) + + return cfg, func() { + require.NoError(t, testEnv.Stop()) + } +}