Skip to content

Commit

Permalink
Implement legacy ResourceQuota and LimitRange generation
Browse files Browse the repository at this point in the history
Replaces https://hub.syn.tools/appuio-cloud/references/policies/11_generate_quota_limit_range_in_ns.html.

Also includes a webhook to deny edits to the synced resources.
  • Loading branch information
bastjan committed Sep 4, 2024
1 parent 597c554 commit 8f9a1fe
Show file tree
Hide file tree
Showing 12 changed files with 613 additions and 3 deletions.
2 changes: 1 addition & 1 deletion Makefile.vars.mk
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ CONTAINER_IMG ?= local.dev/$(PROJECT_OWNER)/$(PROJECT_NAME):$(IMG_TAG)

LOCALBIN ?= $(shell pwd)/bin
ENVTEST ?= $(LOCALBIN)/setup-envtest
ENVTEST_K8S_VERSION = 1.26.1
ENVTEST_K8S_VERSION = 1.28.3

## KIND:setup

Expand Down
13 changes: 13 additions & 0 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"github.com/appuio/appuio-cloud-agent/limits"
"go.uber.org/multierr"
"gopkg.in/inf.v0"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"sigs.k8s.io/yaml"
)
Expand Down Expand Up @@ -71,6 +72,18 @@ type Config struct {
PodRunOnceActiveDeadlineSecondsOverrideAnnotation string
// PodRunOnceActiveDeadlineSecondsDefault is the default activeDeadlineSeconds for RunOnce pods.
PodRunOnceActiveDeadlineSecondsDefault int

// LegacyResourceQuotaAnnotationBase is the base label for the default resource quotas.
// The actual annotation is `$base/$quotaname.$resource`.
LegacyResourceQuotaAnnotationBase string
// LegacyDefaultResourceQuotas is a map containing the default resource quotas for each organization.
// The keys are the name of the manifest and the values are the resource quotas spec.
LegacyDefaultResourceQuotas map[string]corev1.ResourceQuotaSpec

// LegacyLimitRangeName is the name of the default limit range.
LegacyLimitRangeName string
// LegacyDefaultLimitRange is the default limit range.
LegacyDefaultLimitRange corev1.LimitRangeSpec
}

func ConfigFromFile(path string) (c Config, warn []string, err error) {
Expand Down
67 changes: 67 additions & 0 deletions config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,70 @@ AllowedLabels: [appuio.io/organization]
PodRunOnceActiveDeadlineSecondsOverrideAnnotation: appuio.io/active-deadline-seconds-override
# PodRunOnceActiveDeadlineSecondsDefault is the default activeDeadlineSeconds for RunOnce pods.
PodRunOnceActiveDeadlineSecondsDefault: 1800

# LegacyResourceQuotaAnnotationBase is the base label for the default resource quotas.
# The actual annotation is `$base/$quotaname.$resource`.
LegacyResourceQuotaAnnotationBase: resourcequota.appuio.io
# LegacyDefaultResourceQuotas is a map containing the default resource quotas for each organization.
# The keys are the name of the manifest and the values are the resource quotas spec.
LegacyDefaultResourceQuotas:
# See https://kb.vshn.ch/appuio-cloud/references/quality-requirements/performance/resource-quota.html
organization-objects:
hard:
count/configmaps: "150"
count/jobs.batch: "150"
count/secrets: "150"
count/services: "20"
count/services.loadbalancers: "0"
count/services.nodeports: "0"
count/replicationcontrollers: "100"
openshift.io/imagestreams: "20"
openshift.io/imagestreamtags: "50"

requests.storage: 1000Gi
persistentvolumeclaims: "10"
localblock-storage.storageclass.storage.k8s.io/persistentvolumeclaims: "0"
requests.ephemeral-storage: "250Mi"
limits.ephemeral-storage: "500Mi"

# Limit the total amount of Rook-Ceph backed storage which can be
# requested per namespace
cephfs-fspool-cluster.storageclass.storage.k8s.io/requests.storage: 25Gi
rbd-storagepool-cluster.storageclass.storage.k8s.io/requests.storage: 25Gi

organization-compute:
hard:
requests.cpu: 4
requests.memory: 4Gi
limits.cpu: 8
limits.memory: 20Gi
pods: "45"
scopes:
- NotTerminating

organization-compute-terminating:
hard:
limits.cpu: 4000m
limits.memory: 4Gi
pods: "5"
requests.cpu: 500m
requests.memory: 2Gi
scopes:
- Terminating

# LegacyLimitRangeName is the name of the default limit range.
LegacyLimitRangeName: organization
# LegacyDefaultLimitRange is the default limit range.
LegacyDefaultLimitRange:
limits:
- type: Container
min:
cpu: "10m"
memory: "4Mi"
ephemeral-storage: "100Ki"
default:
cpu: "600m"
memory: "768Mi"
defaultRequest:
cpu: "10m"
memory: "100Mi"
23 changes: 23 additions & 0 deletions config/webhook/manifests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,29 @@ kind: ValidatingWebhookConfiguration
metadata:
name: validating-webhook-configuration
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: webhook-service
namespace: system
path: /validate-reserved-resourcequota-limitrange
failurePolicy: Fail
matchPolicy: Equivalent
name: reserved-resourcequota-limitrange-validator.appuio.io
rules:
- apiGroups:
- ""
apiVersions:
- v1
operations:
- CREATE
- UPDATE
- DELETE
resources:
- resourcequotas
- limitranges
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
Expand Down
140 changes: 140 additions & 0 deletions controllers/legacy_resource_quota_controller.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
package controllers

import (
"context"
"encoding/json"
"fmt"
"strings"

"go.uber.org/multierr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
)

// LegacyResourceQuotaReconciler reconciles namespaces and synchronizes their resource quotas
type LegacyResourceQuotaReconciler struct {
client.Client
Scheme *runtime.Scheme
Recorder record.EventRecorder

OrganizationLabel string

ResourceQuotaAnnotationBase string
DefaultResourceQuotas map[string]corev1.ResourceQuotaSpec

LimitRangeName string
DefaultLimitRange corev1.LimitRangeSpec
}

func (r *LegacyResourceQuotaReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
l := log.FromContext(ctx)
l.Info("Reconciling Namespace")

var ns corev1.Namespace
if err := r.Get(ctx, req.NamespacedName, &ns); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if ns.DeletionTimestamp != nil {
l.Info("Namespace is being deleted, skipping reconciliation")
return ctrl.Result{}, nil
}

if _, ok := ns.Labels[r.OrganizationLabel]; !ok {
l.Info("Namespace does not have organization label, skipping reconciliation")
return ctrl.Result{}, nil
}

var errs []error
for name, s := range r.DefaultResourceQuotas {
spec := *s.DeepCopy()

var storageQuotas corev1.ResourceList
if sqa := ns.Annotations[fmt.Sprintf("%s/%s.storageclasses", r.ResourceQuotaAnnotationBase, name)]; sqa != "" {
err := json.Unmarshal([]byte(ns.Annotations[fmt.Sprintf("%s/%s.storageclasses", r.ResourceQuotaAnnotationBase, name)]), &storageQuotas)
if err != nil {
errs = append(errs, fmt.Errorf("failed to unmarshal storage classes: %w", err))
storageQuotas = make(corev1.ResourceList)
}
} else {
storageQuotas = make(corev1.ResourceList)
}

rq := &corev1.ResourceQuota{
ObjectMeta: ctrl.ObjectMeta{
Name: name,
Namespace: ns.Name,
},
}
op, err := controllerutil.CreateOrUpdate(ctx, r.Client, rq, func() error {
for k := range spec.Hard {
an := fmt.Sprintf("%s/%s.%s", r.ResourceQuotaAnnotationBase, name, strings.ReplaceAll(string(k), "/", "_"))
if strings.Contains(string(k), "storageclass.storage.k8s.io") {
if _, ok := storageQuotas[k]; ok {
spec.Hard[k] = storageQuotas[k]
}
} else if a := ns.Annotations[an]; a != "" {
po, err := resource.ParseQuantity(a)
if err != nil {
errs = append(errs, fmt.Errorf("failed to parse quantity %s=%s: %w", an, a, err))
continue
}
spec.Hard[k] = po
}
}

rq.Spec = spec
return controllerutil.SetControllerReference(&ns, rq, r.Scheme)
})
if err != nil {
errs = append(errs, fmt.Errorf("failed to reconcile ResourceQuota %s: %w", name, err))
}
if op != controllerutil.OperationResultNone {
l.Info("Reconciled ResourceQuota", "name", name, "operation", op)
}
}

lr := &corev1.LimitRange{
ObjectMeta: ctrl.ObjectMeta{
Name: r.LimitRangeName,
Namespace: ns.Name,
},
}
op, err := controllerutil.CreateOrUpdate(ctx, r.Client, lr, func() error {
lr.Spec = *r.DefaultLimitRange.DeepCopy()
return controllerutil.SetControllerReference(&ns, lr, r.Scheme)
})
if err != nil {
errs = append(errs, fmt.Errorf("failed to reconcile LimitRange %s: %w", r.LimitRangeName, err))
}
if op != controllerutil.OperationResultNone {
l.Info("Reconciled LimitRange", "name", r.LimitRangeName, "operation", op)
}

if err := multierr.Combine(errs...); err != nil {
r.Recorder.Eventf(&ns, corev1.EventTypeWarning, "ReconcileError", "Failed to reconcile ResourceQuotas and LimitRanges: %s", err.Error())
return ctrl.Result{}, fmt.Errorf("failed to reconcile ResourceQuotas and LimitRanges: %w", err)
}

return ctrl.Result{}, nil
}

// SetupWithManager sets up the controller with the Manager.
func (r *LegacyResourceQuotaReconciler) SetupWithManager(mgr ctrl.Manager) error {
orgPredicate, err := labelExistsPredicate(r.OrganizationLabel)
if err != nil {
return fmt.Errorf("failed to create organization label predicate: %w", err)
}
return ctrl.NewControllerManagedBy(mgr).
Named("legacyresourcequota").
For(&corev1.Namespace{}, builder.WithPredicates(orgPredicate)).
Owns(&corev1.ResourceQuota{}).
Owns(&corev1.LimitRange{}).
Complete(r)
}
89 changes: 89 additions & 0 deletions controllers/legacy_resource_quota_controller_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
package controllers

import (
"context"
"testing"

"github.com/go-logr/logr/testr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/ptr"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/log"
)

func Test_LegacyResourceQuotaReconciler_Reconcile(t *testing.T) {
t.Parallel()

subjectNamespace := newNamespace("test", map[string]string{"organization": "testorg"}, nil)

c, scheme, recorder := prepareClient(t, subjectNamespace)
ctx := log.IntoContext(context.Background(), testr.New(t))

subject := LegacyResourceQuotaReconciler{
Client: c,
Scheme: scheme,
Recorder: recorder,

OrganizationLabel: "organization",

ResourceQuotaAnnotationBase: "resourcequota.example.com",
DefaultResourceQuotas: map[string]corev1.ResourceQuotaSpec{
"orgq": {
Hard: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("10"),
corev1.ResourceRequestsMemory: resource.MustParse("10Gi"),
"count/services.loadbalancers": resource.MustParse("10"),
"localblock-storage.storageclass.storage.k8s.io/persistentvolumeclaims": resource.MustParse("10"),
"cephfs-fspool-cluster.storageclass.storage.k8s.io/requests.storage": resource.MustParse("10"),
"openshift.io/imagestreamtags": resource.MustParse("10"),
},
},
},

LimitRangeName: "limitrange",
DefaultLimitRange: corev1.LimitRangeSpec{
Limits: []corev1.LimitRangeItem{
{
Type: corev1.LimitTypeContainer,
Default: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("1"),
},
},
},
},
}

_, err := subject.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: subjectNamespace.Name}})
require.NoError(t, err)

var syncedRQ corev1.ResourceQuota
require.NoError(t, c.Get(ctx, types.NamespacedName{Name: "orgq", Namespace: "test"}, &syncedRQ))
require.Equal(t, subject.DefaultResourceQuotas["orgq"], syncedRQ.Spec)

var syncedLR corev1.LimitRange
require.NoError(t, c.Get(ctx, types.NamespacedName{Name: "limitrange", Namespace: "test"}, &syncedLR))
require.Equal(t, subject.DefaultLimitRange, syncedLR.Spec)

subjectNamespace.Annotations = map[string]string{
"resourcequota.example.com/orgq.storageclasses": `{"cephfs-fspool-cluster.storageclass.storage.k8s.io/requests.storage":"5"}`,
"resourcequota.example.com/orgq.limits.cpu": "5",
"resourcequota.example.com/orgq.count_services.loadbalancers": "5",
"resourcequota.example.com/orgq.openshift.io_imagestreamtags": "5",
}
require.NoError(t, c.Update(ctx, subjectNamespace))

_, err = subject.Reconcile(ctx, ctrl.Request{NamespacedName: types.NamespacedName{Name: subjectNamespace.Name}})
require.NoError(t, err)

require.NoError(t, c.Get(ctx, types.NamespacedName{Name: "orgq", Namespace: "test"}, &syncedRQ))
assert.Equal(t, "5", ptr.To(syncedRQ.Spec.Hard[corev1.ResourceLimitsCPU]).String())
assert.Equal(t, "5", ptr.To(syncedRQ.Spec.Hard["count/services.loadbalancers"]).String())
assert.Equal(t, "5", ptr.To(syncedRQ.Spec.Hard["openshift.io/imagestreamtags"]).String())
assert.Equal(t, "5", ptr.To(syncedRQ.Spec.Hard["cephfs-fspool-cluster.storageclass.storage.k8s.io/requests.storage"]).String())
assert.Equal(t, "10", ptr.To(syncedRQ.Spec.Hard["localblock-storage.storageclass.storage.k8s.io/persistentvolumeclaims"]).String())
assert.Equal(t, "10Gi", ptr.To(syncedRQ.Spec.Hard[corev1.ResourceRequestsMemory]).String())
}
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ require (
k8s.io/api v0.31.0
k8s.io/apimachinery v0.31.0
k8s.io/client-go v0.31.0
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
sigs.k8s.io/controller-runtime v0.19.0
sigs.k8s.io/controller-tools v0.16.1
sigs.k8s.io/kind v0.24.0
Expand Down Expand Up @@ -86,7 +87,6 @@ require (
k8s.io/apiextensions-apiserver v0.31.0 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34 // indirect
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
)
Loading

0 comments on commit 8f9a1fe

Please sign in to comment.