From f474e7732d591e0db247d5890588c78b1f6e92be Mon Sep 17 00:00:00 2001 From: nb-ohad Date: Sun, 28 Jul 2024 13:41:48 +0300 Subject: [PATCH 1/5] Rename configs.csi.ceph.io API to clientprofiles.csi.ceph.io Note: Design doc alignment will be provided in a future commit for all API changes together Signed-off-by: nb-ohad --- PROJECT | 2 +- ...config_types.go => clientprofile_types.go} | 43 ++++++++++++------- api/v1alpha1/zz_generated.deepcopy.go | 38 ++++++++-------- ...s.yaml => csi.ceph.io_clientprofiles.yaml} | 22 ++++++---- config/crd/kustomization.yaml | 4 +- ...le.yaml => clientprofile_editor_role.yaml} | 8 ++-- ...le.yaml => clientprofile_viewer_role.yaml} | 8 ++-- config/rbac/kustomization.yaml | 4 +- ...g.yaml => csi_v1alpha1_clientprofile.yaml} | 4 +- config/samples/kustomization.yaml | 2 +- internal/controller/config_controller.go | 2 +- internal/controller/config_controller_test.go | 6 +-- 12 files changed, 81 insertions(+), 62 deletions(-) rename api/v1alpha1/{config_types.go => clientprofile_types.go} (56%) rename config/crd/bases/{csi.ceph.io_configs.yaml => csi.ceph.io_clientprofiles.yaml} (83%) rename config/rbac/{config_editor_role.yaml => clientprofile_editor_role.yaml} (72%) rename config/rbac/{config_viewer_role.yaml => clientprofile_viewer_role.yaml} (70%) rename config/samples/{csi_v1alpha1_config.yaml => csi_v1alpha1_clientprofile.yaml} (78%) diff --git a/PROJECT b/PROJECT index d2bb5b9d..7cfd9c66 100644 --- a/PROJECT +++ b/PROJECT @@ -39,7 +39,7 @@ resources: namespaced: true domain: ceph.io group: csi - kind: Config + kind: ClientProfile path: github.com/ceph/ceph-csi-operator/api/v1alpha1 version: v1alpha1 version: "3" diff --git a/api/v1alpha1/config_types.go b/api/v1alpha1/clientprofile_types.go similarity index 56% rename from api/v1alpha1/config_types.go rename to api/v1alpha1/clientprofile_types.go index 70e144a0..3bcb79f0 100644 --- a/api/v1alpha1/config_types.go +++ b/api/v1alpha1/clientprofile_types.go @@ -23,11 +23,13 @@ import ( // CephFsConfigSpec defines the desired CephFs configuration type CephFsConfigSpec struct { + //+kubebuilder:validation:Optional SubVolumeGroup string `json:"subVolumeGroup,omitempty"` } // RbdConfigSpec defines the desired RBD configuration type RbdConfigSpec struct { + //+kubebuilder:validation:Optional RadosNamespace string `json:"radosNamespace,omitempty"` } @@ -35,39 +37,50 @@ type RbdConfigSpec struct { type NfsConfigSpec struct { } -// ConfigSpec defines the desired state of Config -type ConfigSpec struct { +// ClientProfileSpec defines the desired state of Ceph CSI +// configuration for volumes and snapshots configured to use +// this profile +type ClientProfileSpec struct { + //+kubebuilder:validation:Required CephClusterRef corev1.LocalObjectReference `json:"cephClusterRef"` - CephFs *CephFsConfigSpec `json:"cephFs,omitempty"` - Rbd *RbdConfigSpec `json:"rbd,omitempty"` - Nfs *NfsConfigSpec `json:"nfs,omitempty"` + + //+kubebuilder:validation:Optional + CephFs *CephFsConfigSpec `json:"cephFs,omitempty"` + + //+kubebuilder:validation:Optional + Rbd *RbdConfigSpec `json:"rbd,omitempty"` + + //+kubebuilder:validation:Optional + Nfs *NfsConfigSpec `json:"nfs,omitempty"` } -// ConfigStatus defines the observed state of Config -type ConfigStatus struct { +// ClientProfileStatus defines the observed state of Ceph CSI +// configuration for volumes and snapshots configured to use +// this profile +type ClientProfileStatus struct { } //+kubebuilder:object:root=true //+kubebuilder:subresource:status -// Config is the Schema for the configs API -type Config struct { +// ClientProfile is the Schema for the clientprofiles API +type ClientProfile struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ConfigSpec `json:"spec,omitempty"` - Status ConfigStatus `json:"status,omitempty"` + Spec ClientProfileSpec `json:"spec,omitempty"` + Status ClientProfileStatus `json:"status,omitempty"` } //+kubebuilder:object:root=true -// ConfigList contains a list of Config -type ConfigList struct { +// ClientProfileList contains a list of ClientProfile +type ClientProfileList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []Config `json:"items"` + Items []ClientProfile `json:"items"` } func init() { - SchemeBuilder.Register(&Config{}, &ConfigList{}) + SchemeBuilder.Register(&ClientProfile{}, &ClientProfileList{}) } diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 70d8621f..8ffddcd7 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -137,7 +137,7 @@ func (in *CephFsConfigSpec) DeepCopy() *CephFsConfigSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Config) DeepCopyInto(out *Config) { +func (in *ClientProfile) DeepCopyInto(out *ClientProfile) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -145,18 +145,18 @@ func (in *Config) DeepCopyInto(out *Config) { out.Status = in.Status } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. -func (in *Config) DeepCopy() *Config { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientProfile. +func (in *ClientProfile) DeepCopy() *ClientProfile { if in == nil { return nil } - out := new(Config) + out := new(ClientProfile) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Config) DeepCopyObject() runtime.Object { +func (in *ClientProfile) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -164,31 +164,31 @@ func (in *Config) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigList) DeepCopyInto(out *ConfigList) { +func (in *ClientProfileList) DeepCopyInto(out *ClientProfileList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]Config, len(*in)) + *out = make([]ClientProfile, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. -func (in *ConfigList) DeepCopy() *ConfigList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientProfileList. +func (in *ClientProfileList) DeepCopy() *ClientProfileList { if in == nil { return nil } - out := new(ConfigList) + out := new(ClientProfileList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigList) DeepCopyObject() runtime.Object { +func (in *ClientProfileList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -196,7 +196,7 @@ func (in *ConfigList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { +func (in *ClientProfileSpec) DeepCopyInto(out *ClientProfileSpec) { *out = *in out.CephClusterRef = in.CephClusterRef if in.CephFs != nil { @@ -216,27 +216,27 @@ func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. -func (in *ConfigSpec) DeepCopy() *ConfigSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientProfileSpec. +func (in *ClientProfileSpec) DeepCopy() *ClientProfileSpec { if in == nil { return nil } - out := new(ConfigSpec) + out := new(ClientProfileSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { +func (in *ClientProfileStatus) DeepCopyInto(out *ClientProfileStatus) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. -func (in *ConfigStatus) DeepCopy() *ConfigStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientProfileStatus. +func (in *ClientProfileStatus) DeepCopy() *ClientProfileStatus { if in == nil { return nil } - out := new(ConfigStatus) + out := new(ClientProfileStatus) in.DeepCopyInto(out) return out } diff --git a/config/crd/bases/csi.ceph.io_configs.yaml b/config/crd/bases/csi.ceph.io_clientprofiles.yaml similarity index 83% rename from config/crd/bases/csi.ceph.io_configs.yaml rename to config/crd/bases/csi.ceph.io_clientprofiles.yaml index 69cfb0e2..9d285548 100644 --- a/config/crd/bases/csi.ceph.io_configs.yaml +++ b/config/crd/bases/csi.ceph.io_clientprofiles.yaml @@ -4,20 +4,20 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.14.0 - name: configs.csi.ceph.io + name: clientprofiles.csi.ceph.io spec: group: csi.ceph.io names: - kind: Config - listKind: ConfigList - plural: configs - singular: config + kind: ClientProfile + listKind: ClientProfileList + plural: clientprofiles + singular: clientprofile scope: Namespaced versions: - name: v1alpha1 schema: openAPIV3Schema: - description: Config is the Schema for the configs API + description: ClientProfile is the Schema for the clientprofiles API properties: apiVersion: description: |- @@ -37,7 +37,10 @@ spec: metadata: type: object spec: - description: ConfigSpec defines the desired state of Config + description: |- + ClientProfileSpec defines the desired state of Ceph CSI + configuration for volumes and snapshots configured to use + this profile properties: cephClusterRef: description: |- @@ -76,7 +79,10 @@ spec: - cephClusterRef type: object status: - description: ConfigStatus defines the observed state of Config + description: |- + ClientProfileStatus defines the observed state of Ceph CSI + configuration for volumes and snapshots configured to use + this profile type: object type: object served: true diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index e0e33a98..716378ad 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -5,7 +5,7 @@ resources: - bases/csi.ceph.io_drivers.yaml - bases/csi.ceph.io_operatorconfigs.yaml - bases/csi.ceph.io_cephclusters.yaml -- bases/csi.ceph.io_configs.yaml +- bases/csi.ceph.io_clientprofiles.yaml #+kubebuilder:scaffold:crdkustomizeresource patches: @@ -18,7 +18,7 @@ patches: #- path: patches/cainjection_in_drivers.yaml #- path: patches/cainjection_in_operatorconfigs.yaml #- path: patches/cainjection_in_cephclusters.yaml -#- path: patches/cainjection_in_configs.yaml +#- path: patches/cainjection_in_clientprofiles.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # [WEBHOOK] To enable webhook, uncomment the following section diff --git a/config/rbac/config_editor_role.yaml b/config/rbac/clientprofile_editor_role.yaml similarity index 72% rename from config/rbac/config_editor_role.yaml rename to config/rbac/clientprofile_editor_role.yaml index 41988c08..003d63ca 100644 --- a/config/rbac/config_editor_role.yaml +++ b/config/rbac/clientprofile_editor_role.yaml @@ -1,16 +1,16 @@ -# permissions for end users to edit configs. +# permissions for end users to edit clientprofiles. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/name: ceph-csi-operator app.kubernetes.io/managed-by: kustomize - name: config-editor-role + name: clientprofiles-editor-role rules: - apiGroups: - csi.ceph.io resources: - - configs + - clientprofiles verbs: - create - delete @@ -22,6 +22,6 @@ rules: - apiGroups: - csi.ceph.io resources: - - configs/status + - clientprofiles/status verbs: - get diff --git a/config/rbac/config_viewer_role.yaml b/config/rbac/clientprofile_viewer_role.yaml similarity index 70% rename from config/rbac/config_viewer_role.yaml rename to config/rbac/clientprofile_viewer_role.yaml index 1fc523d6..15a9f92e 100644 --- a/config/rbac/config_viewer_role.yaml +++ b/config/rbac/clientprofile_viewer_role.yaml @@ -1,16 +1,16 @@ -# permissions for end users to view configs. +# permissions for end users to view clientprofiles. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/name: ceph-csi-operator app.kubernetes.io/managed-by: kustomize - name: config-viewer-role + name: clientprofile-viewer-role rules: - apiGroups: - csi.ceph.io resources: - - configs + - clientprofiles verbs: - get - list @@ -18,6 +18,6 @@ rules: - apiGroups: - csi.ceph.io resources: - - configs/status + - clientprofiles/status verbs: - get diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index c26ae597..d2344c06 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -22,8 +22,8 @@ resources: # if you do not want those helpers be installed with your Project. - cephcluster_editor_role.yaml - cephcluster_viewer_role.yaml -- config_editor_role.yaml -- config_viewer_role.yaml +- clientprofile_editor_role.yaml +- clientprofile_viewer_role.yaml - operatorconfig_editor_role.yaml - operatorconfig_viewer_role.yaml - driver_editor_role.yaml diff --git a/config/samples/csi_v1alpha1_config.yaml b/config/samples/csi_v1alpha1_clientprofile.yaml similarity index 78% rename from config/samples/csi_v1alpha1_config.yaml rename to config/samples/csi_v1alpha1_clientprofile.yaml index 6ef74392..81c5f874 100644 --- a/config/samples/csi_v1alpha1_config.yaml +++ b/config/samples/csi_v1alpha1_clientprofile.yaml @@ -1,9 +1,9 @@ apiVersion: csi.ceph.io/v1alpha1 -kind: Config +kind: ClientProfile metadata: labels: app.kubernetes.io/name: ceph-csi-operator app.kubernetes.io/managed-by: kustomize - name: config-sample + name: clientprofile-sample spec: # TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index a4712c2c..0b020933 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -3,5 +3,5 @@ resources: - csi_v1alpha1_driver.yaml - csi_v1alpha1_operatorconfig.yaml - csi_v1alpha1_cephcluster.yaml -- csi_v1alpha1_config.yaml +- csi_v1alpha1_clientprofile.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/internal/controller/config_controller.go b/internal/controller/config_controller.go index 37b9ff37..f90a1010 100644 --- a/internal/controller/config_controller.go +++ b/internal/controller/config_controller.go @@ -57,6 +57,6 @@ func (r *ConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr // SetupWithManager sets up the controller with the Manager. func (r *ConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&csiv1alpha1.Config{}). + For(&csiv1alpha1.ClientProfile{}). Complete(r) } diff --git a/internal/controller/config_controller_test.go b/internal/controller/config_controller_test.go index d01c16be..fc945916 100644 --- a/internal/controller/config_controller_test.go +++ b/internal/controller/config_controller_test.go @@ -40,13 +40,13 @@ var _ = Describe("Config Controller", func() { Name: resourceName, Namespace: "default", // TODO(user):Modify as needed } - config := &csiv1alpha1.Config{} + config := &csiv1alpha1.ClientProfile{} BeforeEach(func() { By("creating the custom resource for the Kind Config") err := k8sClient.Get(ctx, typeNamespacedName, config) if err != nil && errors.IsNotFound(err) { - resource := &csiv1alpha1.Config{ + resource := &csiv1alpha1.ClientProfile{ ObjectMeta: metav1.ObjectMeta{ Name: resourceName, Namespace: "default", @@ -59,7 +59,7 @@ var _ = Describe("Config Controller", func() { AfterEach(func() { // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &csiv1alpha1.Config{} + resource := &csiv1alpha1.ClientProfile{} err := k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) From ef334af937e39fdd97ea635b6161e1d90ccd3856 Mon Sep 17 00:00:00 2001 From: nb-ohad Date: Sun, 28 Jul 2024 15:24:09 +0300 Subject: [PATCH 2/5] Rename cephclusters.csi.ceph.io API to cephconnections.csi.ceph.io Note: Design doc alignment will be provided in a future commit for all API changes together Signed-off-by: nb-ohad --- PROJECT | 2 +- ...uster_types.go => cephconnection_types.go} | 39 +++++++++++------- api/v1alpha1/clientprofile_types.go | 2 +- api/v1alpha1/zz_generated.deepcopy.go | 40 +++++++++---------- ....yaml => csi.ceph.io_cephconnections.yaml} | 19 +++++---- .../crd/bases/csi.ceph.io_clientprofiles.yaml | 4 +- config/crd/kustomization.yaml | 4 +- ...e.yaml => cephconnection_editor_role.yaml} | 8 ++-- ...e.yaml => cephconnection_viewer_role.yaml} | 8 ++-- config/rbac/kustomization.yaml | 4 +- ....yaml => csi_v1alpha1_cephconnection.yaml} | 4 +- config/samples/kustomization.yaml | 2 +- test/scripts/collect-logs.sh | 2 +- 13 files changed, 75 insertions(+), 63 deletions(-) rename api/v1alpha1/{cephcluster_types.go => cephconnection_types.go} (51%) rename config/crd/bases/{csi.ceph.io_cephclusters.yaml => csi.ceph.io_cephconnections.yaml} (79%) rename config/rbac/{cephcluster_editor_role.yaml => cephconnection_editor_role.yaml} (72%) rename config/rbac/{cephcluster_viewer_role.yaml => cephconnection_viewer_role.yaml} (69%) rename config/samples/{csi_v1alpha1_cephcluster.yaml => csi_v1alpha1_cephconnection.yaml} (78%) diff --git a/PROJECT b/PROJECT index 7cfd9c66..40900f8e 100644 --- a/PROJECT +++ b/PROJECT @@ -30,7 +30,7 @@ resources: namespaced: true domain: ceph.io group: csi - kind: CephCluster + kind: CephConnection controller: true path: github.com/ceph/ceph-csi-operator/api/v1alpha1 version: v1alpha1 diff --git a/api/v1alpha1/cephcluster_types.go b/api/v1alpha1/cephconnection_types.go similarity index 51% rename from api/v1alpha1/cephcluster_types.go rename to api/v1alpha1/cephconnection_types.go index c064003c..fc537ac3 100644 --- a/api/v1alpha1/cephcluster_types.go +++ b/api/v1alpha1/cephconnection_types.go @@ -22,41 +22,50 @@ import ( // ReadAffinitySpec capture Ceph CSI read affinity settings type ReadAffinitySpec struct { + //+kubebuilder:validation:Required + //+kubebuilder:validation:MinItems:=1 CrushLocationLabels []string `json:"crushLocationLabels,omitempty"` } -// CephClusterSpec defines the desired state of CephCluster -type CephClusterSpec struct { - Monitors []string `json:"monitors"` - ReadAffinity ReadAffinitySpec `json:"readAffinity,omitempty"` - RbdMirrorDaemonCount int `json:"rbdMirrorDaemonCount,omitempty"` +// CephConnectionSpec defines the desired state of CephConnection +type CephConnectionSpec struct { + //+kubebuilder:validation:Required + //+kubebuilder:validation:MinItems:=1 + Monitors []string `json:"monitors"` + + //+kubebuilder:validation:Optional + ReadAffinity ReadAffinitySpec `json:"readAffinity,omitempty"` + + //+kubebuilder:validation:Optional + //+kubebuilder:validation:Minimum:=1 + RbdMirrorDaemonCount int `json:"rbdMirrorDaemonCount,omitempty"` } -// CephClusterStatus defines the observed state of CephCluster -type CephClusterStatus struct { +// CephConnectionStatus defines the observed state of CephConnection +type CephConnectionStatus struct { } //+kubebuilder:object:root=true //+kubebuilder:subresource:status -// CephCluster is the Schema for the cephclusters API -type CephCluster struct { +// CephConnection is the Schema for the cephconnections API +type CephConnection struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec CephClusterSpec `json:"spec,omitempty"` - Status CephClusterStatus `json:"status,omitempty"` + Spec CephConnectionSpec `json:"spec,omitempty"` + Status CephConnectionStatus `json:"status,omitempty"` } //+kubebuilder:object:root=true -// CephClusterList contains a list of CephCluster -type CephClusterList struct { +// CephConnectionList contains a list of CephConnections +type CephConnectionList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []CephCluster `json:"items"` + Items []CephConnection `json:"items"` } func init() { - SchemeBuilder.Register(&CephCluster{}, &CephClusterList{}) + SchemeBuilder.Register(&CephConnection{}, &CephConnectionList{}) } diff --git a/api/v1alpha1/clientprofile_types.go b/api/v1alpha1/clientprofile_types.go index 3bcb79f0..c50917c9 100644 --- a/api/v1alpha1/clientprofile_types.go +++ b/api/v1alpha1/clientprofile_types.go @@ -42,7 +42,7 @@ type NfsConfigSpec struct { // this profile type ClientProfileSpec struct { //+kubebuilder:validation:Required - CephClusterRef corev1.LocalObjectReference `json:"cephClusterRef"` + CephConnectionRef corev1.LocalObjectReference `json:"cephConnectionRef"` //+kubebuilder:validation:Optional CephFs *CephFsConfigSpec `json:"cephFs,omitempty"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 8ffddcd7..b2881b1b 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -27,7 +27,7 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephCluster) DeepCopyInto(out *CephCluster) { +func (in *CephConnection) DeepCopyInto(out *CephConnection) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -35,18 +35,18 @@ func (in *CephCluster) DeepCopyInto(out *CephCluster) { out.Status = in.Status } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephCluster. -func (in *CephCluster) DeepCopy() *CephCluster { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephConnection. +func (in *CephConnection) DeepCopy() *CephConnection { if in == nil { return nil } - out := new(CephCluster) + out := new(CephConnection) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephCluster) DeepCopyObject() runtime.Object { +func (in *CephConnection) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -54,31 +54,31 @@ func (in *CephCluster) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephClusterList) DeepCopyInto(out *CephClusterList) { +func (in *CephConnectionList) DeepCopyInto(out *CephConnectionList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]CephCluster, len(*in)) + *out = make([]CephConnection, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClusterList. -func (in *CephClusterList) DeepCopy() *CephClusterList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephConnectionList. +func (in *CephConnectionList) DeepCopy() *CephConnectionList { if in == nil { return nil } - out := new(CephClusterList) + out := new(CephConnectionList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CephClusterList) DeepCopyObject() runtime.Object { +func (in *CephConnectionList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -86,7 +86,7 @@ func (in *CephClusterList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephClusterSpec) DeepCopyInto(out *CephClusterSpec) { +func (in *CephConnectionSpec) DeepCopyInto(out *CephConnectionSpec) { *out = *in if in.Monitors != nil { in, out := &in.Monitors, &out.Monitors @@ -96,27 +96,27 @@ func (in *CephClusterSpec) DeepCopyInto(out *CephClusterSpec) { in.ReadAffinity.DeepCopyInto(&out.ReadAffinity) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClusterSpec. -func (in *CephClusterSpec) DeepCopy() *CephClusterSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephConnectionSpec. +func (in *CephConnectionSpec) DeepCopy() *CephConnectionSpec { if in == nil { return nil } - out := new(CephClusterSpec) + out := new(CephConnectionSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephClusterStatus) DeepCopyInto(out *CephClusterStatus) { +func (in *CephConnectionStatus) DeepCopyInto(out *CephConnectionStatus) { *out = *in } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephClusterStatus. -func (in *CephClusterStatus) DeepCopy() *CephClusterStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephConnectionStatus. +func (in *CephConnectionStatus) DeepCopy() *CephConnectionStatus { if in == nil { return nil } - out := new(CephClusterStatus) + out := new(CephConnectionStatus) in.DeepCopyInto(out) return out } @@ -198,7 +198,7 @@ func (in *ClientProfileList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClientProfileSpec) DeepCopyInto(out *ClientProfileSpec) { *out = *in - out.CephClusterRef = in.CephClusterRef + out.CephConnectionRef = in.CephConnectionRef if in.CephFs != nil { in, out := &in.CephFs, &out.CephFs *out = new(CephFsConfigSpec) diff --git a/config/crd/bases/csi.ceph.io_cephclusters.yaml b/config/crd/bases/csi.ceph.io_cephconnections.yaml similarity index 79% rename from config/crd/bases/csi.ceph.io_cephclusters.yaml rename to config/crd/bases/csi.ceph.io_cephconnections.yaml index f827b362..373ebf86 100644 --- a/config/crd/bases/csi.ceph.io_cephclusters.yaml +++ b/config/crd/bases/csi.ceph.io_cephconnections.yaml @@ -4,20 +4,20 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.14.0 - name: cephclusters.csi.ceph.io + name: cephconnections.csi.ceph.io spec: group: csi.ceph.io names: - kind: CephCluster - listKind: CephClusterList - plural: cephclusters - singular: cephcluster + kind: CephConnection + listKind: CephConnectionList + plural: cephconnections + singular: cephconnection scope: Namespaced versions: - name: v1alpha1 schema: openAPIV3Schema: - description: CephCluster is the Schema for the cephclusters API + description: CephConnection is the Schema for the cephconnections API properties: apiVersion: description: |- @@ -37,13 +37,15 @@ spec: metadata: type: object spec: - description: CephClusterSpec defines the desired state of CephCluster + description: CephConnectionSpec defines the desired state of CephConnection properties: monitors: items: type: string + minItems: 1 type: array rbdMirrorDaemonCount: + minimum: 1 type: integer readAffinity: description: ReadAffinitySpec capture Ceph CSI read affinity settings @@ -51,13 +53,14 @@ spec: crushLocationLabels: items: type: string + minItems: 1 type: array type: object required: - monitors type: object status: - description: CephClusterStatus defines the observed state of CephCluster + description: CephConnectionStatus defines the observed state of CephConnection type: object type: object served: true diff --git a/config/crd/bases/csi.ceph.io_clientprofiles.yaml b/config/crd/bases/csi.ceph.io_clientprofiles.yaml index 9d285548..5c78b8ed 100644 --- a/config/crd/bases/csi.ceph.io_clientprofiles.yaml +++ b/config/crd/bases/csi.ceph.io_clientprofiles.yaml @@ -42,7 +42,7 @@ spec: configuration for volumes and snapshots configured to use this profile properties: - cephClusterRef: + cephConnectionRef: description: |- LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. @@ -76,7 +76,7 @@ spec: type: string type: object required: - - cephClusterRef + - cephConnectionRef type: object status: description: |- diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 716378ad..5fee13a8 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -4,8 +4,8 @@ resources: - bases/csi.ceph.io_drivers.yaml - bases/csi.ceph.io_operatorconfigs.yaml -- bases/csi.ceph.io_cephclusters.yaml - bases/csi.ceph.io_clientprofiles.yaml +- bases/csi.ceph.io_cephconnections.yaml #+kubebuilder:scaffold:crdkustomizeresource patches: @@ -17,7 +17,7 @@ patches: # patches here are for enabling the CA injection for each CRD #- path: patches/cainjection_in_drivers.yaml #- path: patches/cainjection_in_operatorconfigs.yaml -#- path: patches/cainjection_in_cephclusters.yaml +#- path: patches/cainjection_in_cephconnections.yaml #- path: patches/cainjection_in_clientprofiles.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch diff --git a/config/rbac/cephcluster_editor_role.yaml b/config/rbac/cephconnection_editor_role.yaml similarity index 72% rename from config/rbac/cephcluster_editor_role.yaml rename to config/rbac/cephconnection_editor_role.yaml index 080a7b99..bdb132e3 100644 --- a/config/rbac/cephcluster_editor_role.yaml +++ b/config/rbac/cephconnection_editor_role.yaml @@ -1,16 +1,16 @@ -# permissions for end users to edit cephclusters. +# permissions for end users to edit cephconnections. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/name: ceph-csi-operator app.kubernetes.io/managed-by: kustomize - name: cephcluster-editor-role + name: cephconnections-editor-role rules: - apiGroups: - csi.ceph.io resources: - - cephclusters + - cephconnections verbs: - create - delete @@ -22,6 +22,6 @@ rules: - apiGroups: - csi.ceph.io resources: - - cephclusters/status + - cephconnections/status verbs: - get diff --git a/config/rbac/cephcluster_viewer_role.yaml b/config/rbac/cephconnection_viewer_role.yaml similarity index 69% rename from config/rbac/cephcluster_viewer_role.yaml rename to config/rbac/cephconnection_viewer_role.yaml index bcfe86ca..c1cfec1e 100644 --- a/config/rbac/cephcluster_viewer_role.yaml +++ b/config/rbac/cephconnection_viewer_role.yaml @@ -1,16 +1,16 @@ -# permissions for end users to view cephclusters. +# permissions for end users to view cephconnections. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: app.kubernetes.io/name: ceph-csi-operator app.kubernetes.io/managed-by: kustomize - name: cephcluster-viewer-role + name: cephconnection-viewer-role rules: - apiGroups: - csi.ceph.io resources: - - cephclusters + - cephconnections verbs: - get - list @@ -18,6 +18,6 @@ rules: - apiGroups: - csi.ceph.io resources: - - cephclusters/status + - cephconnections/status verbs: - get diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index d2344c06..6905e4f0 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -20,8 +20,8 @@ resources: # default, aiding admins in cluster management. Those roles are # not used by the Project itself. You can comment the following lines # if you do not want those helpers be installed with your Project. -- cephcluster_editor_role.yaml -- cephcluster_viewer_role.yaml +- cephconnection_editor_role.yaml +- cephconnection_viewer_role.yaml - clientprofile_editor_role.yaml - clientprofile_viewer_role.yaml - operatorconfig_editor_role.yaml diff --git a/config/samples/csi_v1alpha1_cephcluster.yaml b/config/samples/csi_v1alpha1_cephconnection.yaml similarity index 78% rename from config/samples/csi_v1alpha1_cephcluster.yaml rename to config/samples/csi_v1alpha1_cephconnection.yaml index a8f35765..265ed7cb 100644 --- a/config/samples/csi_v1alpha1_cephcluster.yaml +++ b/config/samples/csi_v1alpha1_cephconnection.yaml @@ -1,9 +1,9 @@ apiVersion: csi.ceph.io/v1alpha1 -kind: CephCluster +kind: CephConnection metadata: labels: app.kubernetes.io/name: ceph-csi-operator app.kubernetes.io/managed-by: kustomize - name: cephcluster-sample + name: cephconnection-sample spec: # TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 0b020933..cf8953b5 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -2,6 +2,6 @@ resources: - csi_v1alpha1_driver.yaml - csi_v1alpha1_operatorconfig.yaml -- csi_v1alpha1_cephcluster.yaml +- csi_v1alpha1_cephconnection.yaml - csi_v1alpha1_clientprofile.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/test/scripts/collect-logs.sh b/test/scripts/collect-logs.sh index b6f8984f..eba3db34 100755 --- a/test/scripts/collect-logs.sh +++ b/test/scripts/collect-logs.sh @@ -49,7 +49,7 @@ for NAMESPACE in "${NAMESPACES[@]}"; do # they aren't captured by 'kubectl get all' for CRD in $(kubectl get crds -o jsonpath='{.items[*].metadata.name}'); do for resource in $(kubectl -n "$NAMESPACE" get "$CRD" -o jsonpath='{.items[*].metadata.name}'); do - crd_main_type="${CRD%%.*}" # e.g., for cephclusters.ceph.rook.io, only use 'cephclusters' + crd_main_type="${CRD%%.*}" # e.g., for cephconnections.ceph.rook.io, only use 'cephconnections' kubectl -n "$NAMESPACE" get -o yaml "$CRD" "$resource" >"${NS_DIR}"/"$crd_main_type"-describe--"$resource".txt done done From 972797523e78c250df193ba7cda7dd6ea81c59b4 Mon Sep 17 00:00:00 2001 From: nb-ohad Date: Sun, 28 Jul 2024 16:01:38 +0300 Subject: [PATCH 3/5] Add kubebuilder validatoin rules to drivers.csi.ceph.io API Signed-off-by: nb-ohad --- api/v1alpha1/driver_types.go | 145 ++++++++++++------ api/v1alpha1/zz_generated.deepcopy.go | 5 + config/crd/bases/csi.ceph.io_drivers.yaml | 29 ++-- .../bases/csi.ceph.io_operatorconfigs.yaml | 18 ++- 4 files changed, 135 insertions(+), 62 deletions(-) diff --git a/api/v1alpha1/driver_types.go b/api/v1alpha1/driver_types.go index 2ad843e8..fee4da01 100644 --- a/api/v1alpha1/driver_types.go +++ b/api/v1alpha1/driver_types.go @@ -35,20 +35,20 @@ const ( type LogRotationSpec struct { // MaxFiles is the number of logrtoate files - // +optional + //+kubebuilder:validation:Optional MaxFiles int `json:"maxFiles,omitempty"` // MaxLogSize is the maximum size of the log file per csi pods - // +optional + //+kubebuilder:validation:Optional MaxLogSize resource.Quantity `json:"maxLogSize,omitempty"` // Periodicity is the periodicity of the log rotation. - // +kubebuilder:validation:Enum=hourly;daily;weekly;monthly - // +optional + //+kubebuilder:validation:Optional + //+kubebuilder:validation:Enum:=hourly;daily;weekly;monthly Periodicity PeriodicityType `json:"periodicity,omitempty"` // LogHostPath is the prefix directory path for the csi log files - // +optional + //+kubebuilder:validation:Optional LogHostPath string `json:"logHostPath,omitempty"` } @@ -56,10 +56,13 @@ type LogSpec struct { // Log level for driver pods, // Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity. // Default to 0 + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:validation:Maximum=5 + //+kubebuilder:validation:Optional LogLevel int `json:"logLevel,omitempty"` // log rotation for csi pods - // +optional + //+kubebuilder:validation:Optional Rotation *LogRotationSpec `json:"rotation,omitempty"` } @@ -81,46 +84,69 @@ const ( ) type EncryptionSpec struct { + //+kubebuilder:validation:Required ConfigMapRef corev1.LocalObjectReference `json:"configMapName,omitempty"` } type VolumeSpec struct { - Volume corev1.Volume `json:"volume,omitempty"` - Mount corev1.VolumeMount `json:"mount,omitempty"` + //+kubebuilder:validation:Required + Volume corev1.Volume `json:"volume,omitempty"` + + //+kubebuilder:validation:Required + Mount corev1.VolumeMount `json:"mount,omitempty"` } type PodCommonSpec struct { // Service account name to be used for driver's pods + //+kubebuilder:validation:Optional ServiceAccountName *string `json:"serviceAccountName,omitempty"` // Pod's user defined priority class name + //+kubebuilder:validation:Optional PrioritylClassName *string `json:"priorityClassName,omitempty"` // Pod's labels + //+kubebuilder:validation:Optional Labels map[string]string `json:"labels,omitempty"` // Pod's annotations + //+kubebuilder:validation:Optional Annotations map[string]string `json:"annotations,omitempty"` // Pod's affinity settings + //+kubebuilder:validation:Optional Affinity *corev1.Affinity `json:"affinity,omitempty"` // Pod's tolerations list + //+kubebuilder:validation:Optional + //+kubebuilder:validation:minItems:=1 Tolerations []corev1.Toleration `json:"tolerations,omitempty"` // Volume and volume mount definitions to attach to the pod + //+kubebuilder:validation:Optional + //+kubebuilder:validation:minItems:=1 Volumes []VolumeSpec `json:"volumes,omitempty"` // To indicate the image pull policy to be applied to all the containers in the csi driver pods. + //+kubebuilder:validation:Optional ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy"` } type NodePluginResourcesSpec struct { - Registrar *corev1.ResourceRequirements `json:"registrar,omitempty"` - Liveness *corev1.ResourceRequirements `json:"liveness,omitempty"` - Addons *corev1.ResourceRequirements `json:"addons,omitempty"` + //+kubebuilder:validation:Optional + Registrar *corev1.ResourceRequirements `json:"registrar,omitempty"` + + //+kubebuilder:validation:Optional + Liveness *corev1.ResourceRequirements `json:"liveness,omitempty"` + + //+kubebuilder:validation:Optional + Addons *corev1.ResourceRequirements `json:"addons,omitempty"` + + //+kubebuilder:validation:Optional LogRotator *corev1.ResourceRequirements `json:"logRotator,omitempty"` - Plugin *corev1.ResourceRequirements `json:"plugin,omitempty"` + + //+kubebuilder:validation:Optional + Plugin *corev1.ResourceRequirements `json:"plugin,omitempty"` } type NodePluginSpec struct { @@ -129,28 +155,49 @@ type NodePluginSpec struct { // Driver's plugin daemonset update strategy, supported values are OnDelete and RollingUpdate. // Default value is RollingUpdate with MaxAvailabile set to 1 + //+kubebuilder:validation:Optional UpdateStrategy *appsv1.DaemonSetUpdateStrategy `json:"updateStrategy,omitempty"` // Resource requirements for plugin's containers + //+kubebuilder:validation:Optional Resources NodePluginResourcesSpec `json:"resources,omitempty"` // kubelet directory path, if kubelet configured to use other than /var/lib/kubelet path. + //+kubebuilder:validation:Optional KubeletDirPath string `json:"kubeletDirPath,omitempty"` // Control the host mount of /etc/selinux for csi plugin pods. Defaults to false + //+kubebuilder:validation:Optional EnableSeLinuxHostMount *bool `json:"EnableSeLinuxHostMount,omitempty"` } type ControllerPluginResourcesSpec struct { - Attacher *corev1.ResourceRequirements `json:"attacher,omitempty"` - Snapshotter *corev1.ResourceRequirements `json:"snapshotter,omitempty"` - Resizer *corev1.ResourceRequirements `json:"resizer,omitempty"` - Provisioner *corev1.ResourceRequirements `json:"provisioner,omitempty"` + //+kubebuilder:validation:Optional + Attacher *corev1.ResourceRequirements `json:"attacher,omitempty"` + + //+kubebuilder:validation:Optional + Snapshotter *corev1.ResourceRequirements `json:"snapshotter,omitempty"` + + //+kubebuilder:validation:Optional + Resizer *corev1.ResourceRequirements `json:"resizer,omitempty"` + + //+kubebuilder:validation:Optional + Provisioner *corev1.ResourceRequirements `json:"provisioner,omitempty"` + + //+kubebuilder:validation:Optional OMapGenerator *corev1.ResourceRequirements `json:"omapGenerator,omitempty"` - Liveness *corev1.ResourceRequirements `json:"liveness,omitempty"` - Addons *corev1.ResourceRequirements `json:"addons,omitempty"` - LogRotator *corev1.ResourceRequirements `json:"logRotator,omitempty"` - Plugin *corev1.ResourceRequirements `json:"plugin,omitempty"` + + //+kubebuilder:validation:Optional + Liveness *corev1.ResourceRequirements `json:"liveness,omitempty"` + + //+kubebuilder:validation:Optional + Addons *corev1.ResourceRequirements `json:"addons,omitempty"` + + //+kubebuilder:validation:Optional + LogRotator *corev1.ResourceRequirements `json:"logRotator,omitempty"` + + //+kubebuilder:validation:Optional + Plugin *corev1.ResourceRequirements `json:"plugin,omitempty"` } type ControllerPluginSpec struct { @@ -158,33 +205,46 @@ type ControllerPluginSpec struct { PodCommonSpec `json:"inline"` // Set replicas for controller plugin's deployment. Defaults to 2 + //+kubebuilder:validation:Optional + //+kubebuilder:validation:Minimum:=1 Replicas *int32 `json:"replicas,omitempty"` // Resource requirements for controller plugin's containers + //+kubebuilder:validation:Optional Resources ControllerPluginResourcesSpec `json:"resources,omitempty"` // To enable logrotation for csi pods, // Some platforms require controller plugin to run privileged, // For example, OpenShift with SELinux restrictions requires the pod to be privileged to write to hostPath. - Privileged bool `json:"privileged,omitempty"` + //+kubebuilder:validation:Optional + Privileged *bool `json:"privileged,omitempty"` } type LivenessSpec struct { // Port to expose liveness metrics + //+kubebuilder:validation:Required + //+kubebuilder:validation:Minimum:=1024 + //+kubebuilder:validation:Maximum:=65535 MetricsPort int `json:"metricsPort,omitempty"` } type LeaderElectionSpec struct { // Duration in seconds that non-leader candidates will wait to force acquire leadership. // Default to 137 seconds. + //+kubebuilder:validation:Optional + //+kubebuilder:validation:Minimum:=0 LeaseDuration int `json:"leaseDuration,omitempty"` // Deadline in seconds that the acting leader will retry refreshing leadership before giving up. // Defaults to 107 seconds. + //+kubebuilder:validation:Optional + //+kubebuilder:validation:Minimum:=0 RenewDeadline int `json:"renewDeadline,omitempty"` // Retry Period in seconds the LeaderElector clients should wait between tries of actions. // Defaults to 26 seconds. + //+kubebuilder:validation:Optional + //+kubebuilder:validation:Minimum:=0 RetryPeriod int `json:"retryPeriod,omitempty"` } @@ -201,44 +261,57 @@ const ( // DriverSpec defines the desired state of Driver type DriverSpec struct { // Logging configuration for driver's pods + //+kubebuilder:validation:Optional Log *LogSpec `json:"log,omitempty"` // A reference to a ConfigMap resource holding image overwrite for deployed // containers + //+kubebuilder:validation:Optional ImageSet *corev1.LocalObjectReference `json:"imageSet,omitempty"` // Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful in cases // when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster. + //+kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty"` // Set to true to enable adding volume metadata on the CephFS subvolumes and RBD images. // Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images. // Hence enable metadata is false by default. + //+kubebuilder:validation:Optional EnableMetadata *bool `json:"enableMetadata,omitempty"` // Set the gRPC timeout for gRPC call issued by the driver components + //+kubebuilder:validation:Optional + //+kubebuilder:validation:Minimum:=0 GRpcTimeout int `json:"grpcTimeout,omitempty"` // Select a policy for snapshot behavior: none, autodetect, snapshot, sanpshotGroup + //+kubebuilder:validation:Optional + //+kubebuilder:validation:Enum:=none|autodetect|volumegroupsnapshot|volumesnapshot SnapshotPolicy SnapshotPolicyType `json:"snapshotPolicy,omitempty"` // OMAP generator will generate the omap mapping between the PV name and the RBD image. // Need to be enabled when we are using rbd mirroring feature. // By default OMAP generator sidecar is deployed with Csi controller plugin pod, to disable // it set it to false. + //+kubebuilder:validation:Optional GenerateOMapInfo *bool `json:"generateOMapInfo,omitempty"` // Policy for modifying a volume's ownership or permissions when the PVC is being mounted. // supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html + //+kubebuilder:validation:Optional FsGroupPolicy storagev1.FSGroupPolicy `json:"fsGroupPolicy,omitempty"` // Driver's encryption settings + //+kubebuilder:validation:Optional Encryption *EncryptionSpec `json:"encryption,omitempty"` // Driver's plugin configuration + //+kubebuilder:validation:Optional NodePlugin *NodePluginSpec `json:"nodePlugin,omitempty"` // Driver's controller plugin configuration + //+kubebuilder:validation:Optional ControllerPlugin *ControllerPluginSpec `json:"controllerPlugin,omitempty"` // Whether to skip any attach operation altogether for CephCsi PVCs. @@ -248,57 +321,43 @@ type DriverSpec struct { // csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set to false // since we'll have no VolumeAttachments to determine which node the PVC is mounted on. // Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details. + //+kubebuilder:validation:Optional AttachRequired *bool `json:"attachRequired,omitempty"` // Liveness metrics configuration. // disabled by default. + //+kubebuilder:validation:Optional Liveness *LivenessSpec `json:"liveness,omitempty"` // Leader election setting + //+kubebuilder:validation:Optional LeaderElection *LeaderElectionSpec `json:"leaderElection,omitempty"` // TODO: do we want Csi addon specific field? or should we generalize to // a list of additional sidecars? + //+kubebuilder:validation:Optional DeployCsiAddons *bool `json:"deployCsiAddons,omitempty"` // Select between between cephfs kernel driver and ceph-fuse // If you select a non-kernel client, your application may be disrupted during upgrade. // See the upgrade guide: https://rook.io/docs/rook/latest/ceph-upgrade.html // NOTE! cephfs quota is not supported in kernel version < 4.17 + //+kubebuilder:validation:Optional + //+kubebuilder:validation:Enum:=autodetect;kernel CephFsClientType CephFsClientType `json:"cephFsClientType,omitempty"` // Set mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options // Set to "ms_mode=secure" when connections.encrypted is enabled in Ceph + //+kubebuilder:validation:Optional KernelMountOptions map[string]string `json:"kernelMountOptions,omitempty"` // Set mount options to use when using the Fuse client + //+kubebuilder:validation:Optional FuseMountOptions map[string]string `json:"fuseMountOptions,omitempty"` } -type DriverPhaseType string - -const ( - ReadyDriverPhase DriverPhaseType = "Ready" -) - -type DriverReasonType string - -// TODO: Add failure reason codes -const () - // DriverStatus defines the observed state of Driver type DriverStatus struct { - // TODO: Consider to move away from a single phase to a conditions based approach - // or the a Ready list approach. Main reason this reconciler address multiple - - // The last known state of the latest reconcile - Phase DriverPhaseType `json:"phase,omitempty"` - - // The reason for the last transition change. - Reason DriverReasonType `json:"reason,omitempty"` - - // A human readable message indicating details about the last transition. - Message string `json:"message,omitempty"` } //+kubebuilder:object:root=true diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index b2881b1b..324c1d3a 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -311,6 +311,11 @@ func (in *ControllerPluginSpec) DeepCopyInto(out *ControllerPluginSpec) { **out = **in } in.Resources.DeepCopyInto(&out.Resources) + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerPluginSpec. diff --git a/config/crd/bases/csi.ceph.io_drivers.yaml b/config/crd/bases/csi.ceph.io_drivers.yaml index cce3a44c..e2aff468 100644 --- a/config/crd/bases/csi.ceph.io_drivers.yaml +++ b/config/crd/bases/csi.ceph.io_drivers.yaml @@ -55,6 +55,9 @@ spec: If you select a non-kernel client, your application may be disrupted during upgrade. See the upgrade guide: https://rook.io/docs/rook/latest/ceph-upgrade.html NOTE! cephfs quota is not supported in kernel version < 4.17 + enum: + - autodetect + - kernel type: string clusterName: description: |- @@ -2920,8 +2923,6 @@ spec: type: object type: object type: array - required: - - imagePullPolicy type: object privileged: description: |- @@ -2933,6 +2934,7 @@ spec: description: Set replicas for controller plugin's deployment. Defaults to 2 format: int32 + minimum: 1 type: integer resources: description: Resource requirements for controller plugin's containers @@ -3498,6 +3500,7 @@ spec: grpcTimeout: description: Set the gRPC timeout for gRPC call issued by the driver components + minimum: 0 type: integer imageSet: description: |- @@ -3531,16 +3534,19 @@ spec: description: |- Duration in seconds that non-leader candidates will wait to force acquire leadership. Default to 137 seconds. + minimum: 0 type: integer renewDeadline: description: |- Deadline in seconds that the acting leader will retry refreshing leadership before giving up. Defaults to 107 seconds. + minimum: 0 type: integer retryPeriod: description: |- Retry Period in seconds the LeaderElector clients should wait between tries of actions. Defaults to 26 seconds. + minimum: 0 type: integer type: object liveness: @@ -3550,6 +3556,8 @@ spec: properties: metricsPort: description: Port to expose liveness metrics + maximum: 65535 + minimum: 1024 type: integer type: object log: @@ -3560,6 +3568,8 @@ spec: Log level for driver pods, Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity. Default to 0 + maximum: 5 + minimum: 0 type: integer rotation: description: log rotation for csi pods @@ -6452,8 +6462,6 @@ spec: type: object type: object type: array - required: - - imagePullPolicy type: object kubeletDirPath: description: kubelet directory path, if kubelet configured to @@ -6812,21 +6820,12 @@ spec: snapshotPolicy: description: 'Select a policy for snapshot behavior: none, autodetect, snapshot, sanpshotGroup' + enum: + - none|autodetect|volumegroupsnapshot|volumesnapshot type: string type: object status: description: DriverStatus defines the observed state of Driver - properties: - message: - description: A human readable message indicating details about the - last transition. - type: string - phase: - description: The last known state of the latest reconcile - type: string - reason: - description: The reason for the last transition change. - type: string type: object type: object served: true diff --git a/config/crd/bases/csi.ceph.io_operatorconfigs.yaml b/config/crd/bases/csi.ceph.io_operatorconfigs.yaml index dfca8de0..9d480911 100644 --- a/config/crd/bases/csi.ceph.io_operatorconfigs.yaml +++ b/config/crd/bases/csi.ceph.io_operatorconfigs.yaml @@ -59,6 +59,9 @@ spec: If you select a non-kernel client, your application may be disrupted during upgrade. See the upgrade guide: https://rook.io/docs/rook/latest/ceph-upgrade.html NOTE! cephfs quota is not supported in kernel version < 4.17 + enum: + - autodetect + - kernel type: string clusterName: description: |- @@ -2957,8 +2960,6 @@ spec: type: object type: object type: array - required: - - imagePullPolicy type: object privileged: description: |- @@ -2970,6 +2971,7 @@ spec: description: Set replicas for controller plugin's deployment. Defaults to 2 format: int32 + minimum: 1 type: integer resources: description: Resource requirements for controller plugin's @@ -3545,6 +3547,7 @@ spec: grpcTimeout: description: Set the gRPC timeout for gRPC call issued by the driver components + minimum: 0 type: integer imageSet: description: |- @@ -3578,16 +3581,19 @@ spec: description: |- Duration in seconds that non-leader candidates will wait to force acquire leadership. Default to 137 seconds. + minimum: 0 type: integer renewDeadline: description: |- Deadline in seconds that the acting leader will retry refreshing leadership before giving up. Defaults to 107 seconds. + minimum: 0 type: integer retryPeriod: description: |- Retry Period in seconds the LeaderElector clients should wait between tries of actions. Defaults to 26 seconds. + minimum: 0 type: integer type: object liveness: @@ -3597,6 +3603,8 @@ spec: properties: metricsPort: description: Port to expose liveness metrics + maximum: 65535 + minimum: 1024 type: integer type: object log: @@ -3607,6 +3615,8 @@ spec: Log level for driver pods, Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity. Default to 0 + maximum: 5 + minimum: 0 type: integer rotation: description: log rotation for csi pods @@ -6533,8 +6543,6 @@ spec: type: object type: object type: array - required: - - imagePullPolicy type: object kubeletDirPath: description: kubelet directory path, if kubelet configured @@ -6898,6 +6906,8 @@ spec: snapshotPolicy: description: 'Select a policy for snapshot behavior: none, autodetect, snapshot, sanpshotGroup' + enum: + - none|autodetect|volumegroupsnapshot|volumesnapshot type: string type: object logLevel: From 57430d81cdacd9d85eb61ad0711c10f6b46efa20 Mon Sep 17 00:00:00 2001 From: nb-ohad Date: Sun, 28 Jul 2024 18:54:44 +0300 Subject: [PATCH 4/5] Add kubebuilder validation rules for operatorconfigs.csi.ceph.com Signed-off-by: nb-ohad --- api/v1alpha1/operatorconfig_types.go | 4 ++++ config/crd/bases/csi.ceph.io_operatorconfigs.yaml | 2 ++ 2 files changed, 6 insertions(+) diff --git a/api/v1alpha1/operatorconfig_types.go b/api/v1alpha1/operatorconfig_types.go index 46d57916..c3d24603 100644 --- a/api/v1alpha1/operatorconfig_types.go +++ b/api/v1alpha1/operatorconfig_types.go @@ -23,9 +23,13 @@ import ( // OperatorConfigSpec defines the desired state of OperatorConfig type OperatorConfigSpec struct { // Operator's log level + //+kubebuilder:validation:Optional + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:validation:Maximum=3 LogLevel int `json:"logLevel,omitempty"` // Allow overwrite of hardcoded defaults for any driver managed by this operator + //+kubebuilder:validation:Optional DriverSpecDefaults *DriverSpec `json:"driverSpecDefaults,omitempty"` } diff --git a/config/crd/bases/csi.ceph.io_operatorconfigs.yaml b/config/crd/bases/csi.ceph.io_operatorconfigs.yaml index 9d480911..1207fa40 100644 --- a/config/crd/bases/csi.ceph.io_operatorconfigs.yaml +++ b/config/crd/bases/csi.ceph.io_operatorconfigs.yaml @@ -6912,6 +6912,8 @@ spec: type: object logLevel: description: Operator's log level + maximum: 3 + minimum: 0 type: integer type: object status: From 69af8e6bcfab48db87a11a0208d4be2ceab1a63e Mon Sep 17 00:00:00 2001 From: nb-ohad Date: Sun, 28 Jul 2024 19:15:21 +0300 Subject: [PATCH 5/5] Update operator design doc to align with current changes and renames Signed-off-by: nb-ohad --- docs/design/operator.md | 87 ++++++++++++++++++----------------------- 1 file changed, 39 insertions(+), 48 deletions(-) diff --git a/docs/design/operator.md b/docs/design/operator.md index 8e12f0d0..df674281 100644 --- a/docs/design/operator.md +++ b/docs/design/operator.md @@ -63,7 +63,7 @@ In this diagram: ## CRDs for ceph-csi-operator -### CephCSIOperatorConfig CRD +### OperatorConfig CRD Manages operator-level configurations and offers a place to overwrite settings for CSI drivers. This CRD is a namespace-scoped CRD and a single CR named @@ -77,7 +77,7 @@ The configurations are categorized into 2 different types ```yaml --- -kind: CephCSIOperatorConfig +kind: OperatorConfig apiVersion: csi.ceph.io/v1alpha1 metadata: name: ceph-csi-operator-config @@ -87,12 +87,12 @@ spec: driverSpecDefaults: log: logLevel: 5 - logRotation: - # one of: hourly, daily, weekly, monthly - periodicity: daily - maxLogSize: 500M - maxFiles: 5 - logHostPath: /var/lib/cephcsi + rotation: + # one of: hourly, daily, weekly, monthly + periodicity: daily + maxLogSize: 500M + maxFiles: 5 + logHostPath: /var/lib/cephcsi clusterName: 5c63ad7e-74fe-4724-a511-4ccdc560da56 enableMetadata: true grpcTimeout: 100 @@ -102,7 +102,7 @@ spec: encryption: configMapRef: name: encryption-config-map-name - plugin: + nodePlugin: priorityClassName: system-node-critical updateStrategy: type: RollingUpdate @@ -159,14 +159,14 @@ spec: mountPropagation: Bidirectional kubeletDirPath: "/var/lib/kubelet" imagePullPolicy: IfNotPresent - provisioner: + controllerPlugin: priorityClassName: system-cluster-critical labels: - app: provisioner + app: cephfs-ctrlplugin privileged: true annotations: k8s.v1.cni.cncf.io/networks: macvlan-conf-1 - provisionerReplicas: 2 + replicas: 2 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -243,7 +243,7 @@ status: reason: operator config successfully created ``` -### CephCSIDriver CRD +### Driver CRD Manages the installation, lifecycle management, and configuration for CephFS, RBD, and NFS CSI drivers within namespaces. @@ -257,7 +257,7 @@ RBD, and NFS CSI drivers within namespaces. ```yaml --- -kind: CephCSIDriver +kind: Driver apiVersion: csi.ceph.io/v1alpha1 metadata: name: "..csi.ceph.com" @@ -267,25 +267,25 @@ spec: encryption: configMapRef: name: encryption-config-map-name - plugin: + nodePlugin: priorityClassName: system-node-critical updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 labels: - app: cephfs-plugin + app: cephfs-nodeplugin annotations: k8s.v1.cni.cncf.io/networks: macvlan-conf-1 logRotator: cpu: "100m" memory: "32Mi" - provisioner: + controllerPlugin: labels: - app: ceph-fs-provisioner + app: cephfs-ctrlplugin annotations: k8s.v1.cni.cncf.io/networks: macvlan-conf-1 - provisionerReplicas: 2 + replicas: 2 leaderElection: leaseDuration: 100 renewDeadline: 100 @@ -299,19 +299,17 @@ spec: deployCSIAddons: false kernelClient: true kernelMountOptions: ms_mode=secure -status: - phase: Failed - reason: csi driver with same name already exists in the cluster +status: {} ``` -### CephCSICephCluster CRD +### CephConnection CRD Stores connection and configuration details for a single Ceph cluster and provide the information to be used by multiple CSI drivers. ```yaml --- -kind: CephCSICephCluster +kind: CephConnection apiVersion: csi.ceph.io/v1alpha1 metadata: name: ceph-cluster-1 @@ -327,30 +325,24 @@ spec: - topology.kubernetes.io/region - topology.kubernetes.io/zone rbdMirrorDaemonCount: 2 - cephConfig: - global: - auth_cluster_required: none - auth_service_required: none - auth_client_required: none - rbd_validate_pool: false status: {} ``` -### CephCSIConfig CRD +### ClientProfile CRD Contains details about CephFS, RBD, and NFS configuration to be used when -communicating with Ceph. Include a reference to a CephCSICephCluster holding +communicating with Ceph. Include a reference to a CephConnection holding the connection information for the target Ceph cluster. ```yaml --- -kind: CephCSIConfig +kind: ClientProfile apiVersion: csi.ceph.io/v1alpha1 metadata: name: storage namespace: spec: - cephClusterRef: + cephConnectionRef: name: ceph-cluster-1 cephFS: subvolumeGroup: csi @@ -358,22 +350,20 @@ spec: fuseMountOptions: debug rbd: radosNamespace: rados-test -status: - phase: Succeeded - reason: successfully linked to CephClusterRef +status: {} ``` -### CephCSIConfigMapping +### ClientProfileMapping -The CephCSIConfigMapping CR contains a mapping between local and remote Ceph -cluster configurations.It also provides a mapping between the -CephCSIConfigurations. The information in this CR helps Ceph-CSI identify -peered blockpools. This can help with the correct management and consumption of -volumes in backup and DR scenarios +The ClientProfileMapping CR contains a mapping between pairs of Ceph +CSI client profiles. The information in this CR helps Ceph CSI identify +peered configuration between different k8s cluster. This information is +utilized in the management and consumption of volumes in backup and +DR scenarios ```yaml --- -kind: CephCSIConfigMapping +kind: ClientProfileMapping apiVersion: csi.ceph.io/v1 metadata: name: storage @@ -381,19 +371,20 @@ metadata: spec: blockPoolMapping: - local: - cephCSIConfigName: remote1-cephCSICluster-name + clientPorfileName: local-clientprofile-name poolID: 2 remote: - cephCSIConfigName: remote1-cephCSICluster-name + clientProfileName: remote-clientprofile-name poolID: 2 - local: - cephCSIConfigName: remote1-cephCSICluster-name + clientProfileName: another-local-clientprofile-name poolID: 2 remote: - cephCSIConfigName: remote2-cephCSICluster-name + clientProfileName: different-remote-clientprofile-name poolID: 3 ``` By following this design document, the Ceph CSI Operator can be effectively implemented, providing automated and scalable management of Ceph CSI drivers within Kubernetes clusters. +