From 77067d087b0952acb8d88846a25dafac06672f16 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Wed, 22 Feb 2023 11:30:34 +0100 Subject: [PATCH 01/27] Implement configuration merge --- rust/crd/src/lib.rs | 273 +++++++++++-------- rust/crd/src/resource.rs | 39 +-- rust/operator-binary/src/druid_controller.rs | 73 ++--- 3 files changed, 206 insertions(+), 179 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 946d4ecc..450b0cea 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -31,10 +31,13 @@ use stackable_operator::{ labels::ObjectLabels, product_config::types::PropertyNameKind, product_config_utils::{ConfigError, Configuration}, - role_utils::{CommonConfiguration, Role, RoleGroup}, + role_utils::{CommonConfiguration, Role, RoleGroup, RoleGroupRef}, schemars::{self, JsonSchema}, }; -use std::collections::{BTreeMap, HashMap}; +use std::{ + collections::{BTreeMap, HashMap}, + str::FromStr, +}; use strum::{Display, EnumDiscriminants, EnumIter, EnumString, IntoStaticStr}; use tls::default_druid_tls; @@ -125,8 +128,8 @@ pub enum Error { }, #[snafu(display("2 differing s3 connections were given, this is unsupported by Druid"))] IncompatibleS3Connections, - #[snafu(display("the role group {rolegroup_name} is not defined"))] - CannotRetrieveRoleGroup { rolegroup_name: String }, + #[snafu(display("Unknown Druid role found {role}. Should be one of {roles:?}"))] + UnknownDruidRole { role: String, roles: Vec }, #[snafu(display("missing namespace for resource {name}"))] MissingNamespace { name: String }, #[snafu(display("fragment validation failure"))] @@ -194,100 +197,76 @@ pub struct DruidClusterConfig { pub zookeeper_config_map_name: String, } -/// Common configuration for all role groups -pub struct CommonRoleGroupConfig { - pub resources: RoleResource, - pub replicas: Option, - pub selector: Option, -} - -/// Container for the merged and validated role group configurations -/// -/// This structure contains for every role a map from the role group names to their configurations. -/// The role group configurations are merged with the role and default configurations. The product -/// configuration is not applied. pub struct MergedConfig { - /// Merged configuration of the broker role - pub brokers: HashMap>, - /// Merged configuration of the coordinator role - pub coordinators: HashMap>, - /// Merged configuration of the historical role - pub historicals: HashMap>, - /// Merged configuration of the middle manager role - pub middle_managers: HashMap>, - /// Merged configuration of the router role - pub routers: HashMap>, + pub brokers: HashMap, + pub coordinators: HashMap, + pub historicals: HashMap, + pub middle_managers: HashMap, + pub routers: HashMap, } impl MergedConfig { - /// Returns the common configuration for the given role and rolegroup name - pub fn common_config( - &self, - role: DruidRole, - rolegroup_name: &str, - ) -> Result { + pub fn resources(&self, role: DruidRole, role_group: &str) -> RoleResource { + self.common_config(role, role_group).resources + } + + pub fn common_config(&self, role: DruidRole, role_group: &str) -> CommonConfig { match role { DruidRole::Broker => { - let rolegroup = self + let config = self .brokers - .get(rolegroup_name) - .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; - Ok(CommonRoleGroupConfig { - resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), - replicas: rolegroup.replicas, - selector: rolegroup.selector.to_owned(), - }) + .get(role_group) + .cloned() + // TODO default? + .unwrap_or_default(); + CommonConfig { + resources: RoleResource::Druid(config.resources), + } } DruidRole::Coordinator => { - let rolegroup = self + let config = self .coordinators - .get(rolegroup_name) - .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; - Ok(CommonRoleGroupConfig { - resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), - replicas: rolegroup.replicas, - selector: rolegroup.selector.to_owned(), - }) + .get(role_group) + .cloned() + .unwrap_or_default(); + CommonConfig { + resources: RoleResource::Druid(config.resources), + } } DruidRole::Historical => { - let rolegroup = self + let config = self .historicals - .get(rolegroup_name) - .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; - Ok(CommonRoleGroupConfig { - resources: RoleResource::Historical( - rolegroup.config.config.resources.to_owned(), - ), - replicas: rolegroup.replicas, - selector: rolegroup.selector.to_owned(), - }) + .get(role_group) + .cloned() + .unwrap_or_default(); + CommonConfig { + resources: RoleResource::Historical(config.resources), + } } DruidRole::MiddleManager => { - let rolegroup = self + let config = self .middle_managers - .get(rolegroup_name) - .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; - Ok(CommonRoleGroupConfig { - resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), - replicas: rolegroup.replicas, - selector: rolegroup.selector.to_owned(), - }) + .get(role_group) + .cloned() + .unwrap_or_default(); + CommonConfig { + resources: RoleResource::Druid(config.resources), + } } DruidRole::Router => { - let rolegroup = self - .routers - .get(rolegroup_name) - .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; - Ok(CommonRoleGroupConfig { - resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), - replicas: rolegroup.replicas, - selector: rolegroup.selector.to_owned(), - }) + let config = self.routers.get(role_group).cloned().unwrap_or_default(); + CommonConfig { + resources: RoleResource::Druid(config.resources), + } } } } } +pub struct CommonConfig { + pub resources: RoleResource, +} + #[derive( Clone, Debug, @@ -469,6 +448,86 @@ impl DruidCluster { Ok(result) } + /// Takes a rolegoup_ref (with role and role group name) and returns the selector defined for + /// that role group. + pub fn node_selector( + &self, + rolegroup_ref: &RoleGroupRef, + ) -> Option { + match DruidRole::from_str(rolegroup_ref.role.as_str()).unwrap() { + DruidRole::Broker => self + .spec + .brokers + .role_groups + .get(&rolegroup_ref.role_group) + .and_then(|rg| rg.selector.clone()), + DruidRole::MiddleManager => self + .spec + .middle_managers + .role_groups + .get(&rolegroup_ref.role_group) + .and_then(|rg| rg.selector.clone()), + DruidRole::Coordinator => self + .spec + .coordinators + .role_groups + .get(&rolegroup_ref.role_group) + .and_then(|rg| rg.selector.clone()), + DruidRole::Historical => self + .spec + .historicals + .role_groups + .get(&rolegroup_ref.role_group) + .and_then(|rg| rg.selector.clone()), + DruidRole::Router => self + .spec + .routers + .role_groups + .get(&rolegroup_ref.role_group) + .and_then(|rg| rg.selector.clone()), + } + } + + pub fn replicas(&self, rolegroup_ref: &RoleGroupRef) -> Option { + match DruidRole::from_str(rolegroup_ref.role.as_str()).unwrap() { + DruidRole::Broker => self + .spec + .brokers + .role_groups + .get(&rolegroup_ref.role_group) + .and_then(|rg| rg.replicas) + .map(i32::from), + DruidRole::MiddleManager => self + .spec + .middle_managers + .role_groups + .get(&rolegroup_ref.role_group) + .and_then(|rg| rg.replicas) + .map(i32::from), + DruidRole::Coordinator => self + .spec + .coordinators + .role_groups + .get(&rolegroup_ref.role_group) + .and_then(|rg| rg.replicas) + .map(i32::from), + DruidRole::Historical => self + .spec + .historicals + .role_groups + .get(&rolegroup_ref.role_group) + .and_then(|rg| rg.replicas) + .map(i32::from), + DruidRole::Router => self + .spec + .routers + .role_groups + .get(&rolegroup_ref.role_group) + .and_then(|rg| rg.replicas) + .map(i32::from), + } + } + pub fn build_role_properties( &self, ) -> HashMap< @@ -610,80 +669,64 @@ impl DruidCluster { s3_ingestion || s3_storage } - /// Returns the merged and validated configuration for all roles pub fn merged_config(&self) -> Result { Ok(MergedConfig { - brokers: DruidCluster::merged_role( + brokers: DruidCluster::merged_role_config( &self.spec.brokers, &BrokerConfig::default_config(), )?, - coordinators: DruidCluster::merged_role( + coordinators: DruidCluster::merged_role_config( &self.spec.coordinators, &CoordinatorConfig::default_config(), )?, - historicals: DruidCluster::merged_role( + historicals: DruidCluster::merged_role_config( &self.spec.historicals, &HistoricalConfig::default_config(), )?, - middle_managers: DruidCluster::merged_role( + middle_managers: DruidCluster::merged_role_config( &self.spec.middle_managers, &MiddleManagerConfig::default_config(), )?, - routers: DruidCluster::merged_role( + routers: DruidCluster::merged_role_config( &self.spec.routers, &RouterConfig::default_config(), )?, }) } - /// Merges and validates the role groups of the given role with the given default configuration - fn merged_role( + fn merged_role_config( role: &Role, default_config: &T::Fragment, - ) -> Result>, Error> + ) -> Result, Error> where T: FromFragment, T::Fragment: Clone + Merge, { let mut merged_role_config = HashMap::new(); - for (rolegroup_name, rolegroup) in &role.role_groups { - let merged_rolegroup_config = - DruidCluster::merged_rolegroup(rolegroup, &role.config.config, default_config)?; + for ( + rolegroup_name, + RoleGroup { + config: + CommonConfiguration { + config: rolegroup_config, + .. + }, + .. + }, + ) in &role.role_groups + { + let merged_rolegroup_config = DruidCluster::merged_rolegroup_config( + rolegroup_config, + &role.config.config, + default_config, + )?; merged_role_config.insert(rolegroup_name.to_owned(), merged_rolegroup_config); } Ok(merged_role_config) } - /// Merges and validates the given role group with the given role and default configurations - fn merged_rolegroup( - rolegroup: &RoleGroup, - role_config: &T::Fragment, - default_config: &T::Fragment, - ) -> Result, Error> - where - T: FromFragment, - T::Fragment: Clone + Merge, - { - let merged_config = DruidCluster::merged_rolegroup_config( - &rolegroup.config.config, - role_config, - default_config, - )?; - Ok(RoleGroup { - config: CommonConfiguration { - config: merged_config, - config_overrides: rolegroup.config.config_overrides.to_owned(), - env_overrides: rolegroup.config.env_overrides.to_owned(), - cli_overrides: rolegroup.config.cli_overrides.to_owned(), - }, - replicas: rolegroup.replicas, - selector: rolegroup.selector.to_owned(), - }) - } - - /// Merges and validates the given role group, role, and default configurations pub fn merged_rolegroup_config( rolegroup_config: &T::Fragment, role_config: &T::Fragment, diff --git a/rust/crd/src/resource.rs b/rust/crd/src/resource.rs index d41f3231..7dbc1d42 100644 --- a/rust/crd/src/resource.rs +++ b/rust/crd/src/resource.rs @@ -52,8 +52,8 @@ impl RoleResource { pub fn as_memory_limits(&self) -> MemoryLimits { match self { - Self::Druid(r) => r.memory.clone(), - Self::Historical(r) => r.memory.clone(), + Self::Druid(r) => r.clone().memory, + Self::Historical(r) => r.clone().memory, } } @@ -192,7 +192,6 @@ mod test { NoRuntimeLimitsFragment, }, k8s_openapi::apimachinery::pkg::api::resource::Quantity, - role_utils::{CommonConfiguration, RoleGroup}, }; #[rstest] @@ -344,16 +343,8 @@ mod test { ); let config = cluster.merged_config().unwrap(); - if let Some(RoleGroup { - config: - CommonConfiguration { - config: - MiddleManagerConfig { - resources: middlemanager_resources_from_rg, - }, - .. - }, - .. + if let Some(MiddleManagerConfig { + resources: middlemanager_resources_from_rg, }) = config.middle_managers.get("resources-from-role-group") { let expected = Resources { @@ -376,16 +367,8 @@ mod test { panic!("No role group named [resources-from-role-group] found"); } - if let Some(RoleGroup { - config: - CommonConfiguration { - config: - MiddleManagerConfig { - resources: middlemanager_resources_from_rg, - }, - .. - }, - .. + if let Some(MiddleManagerConfig { + resources: middlemanager_resources_from_rg, }) = config.middle_managers.get("resources-from-role") { let expected = Resources { @@ -419,10 +402,7 @@ mod test { // ---------- default role group let config = cluster.merged_config().unwrap(); - let res = config - .common_config(DruidRole::Historical, "default") - .unwrap() - .resources; + let res = config.resources(DruidRole::Historical, "default"); let mut got = BTreeMap::new(); assert!(res.update_druid_config_file(&mut got).is_ok()); @@ -433,10 +413,7 @@ mod test { assert_eq!(value, &expected, "primary"); // ---------- secondary role group - let res = config - .common_config(DruidRole::Historical, "secondary") - .unwrap() - .resources; + let res = config.resources(DruidRole::Historical, "secondary"); let mut got = BTreeMap::new(); assert!(res.update_druid_config_file(&mut got).is_ok()); diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index 6f9f886f..0022cb40 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -11,7 +11,6 @@ use crate::{ }; use snafu::{OptionExt, ResultExt, Snafu}; -use stackable_druid_crd::build_recommended_labels; use stackable_druid_crd::{ authorization::DruidAuthorization, build_string_list, @@ -20,11 +19,14 @@ use stackable_druid_crd::{ PLACEHOLDER_LDAP_BIND_USER, }, security::{resolve_authentication_classes, DruidTlsSecurity}, - CommonRoleGroupConfig, DeepStorageSpec, DruidCluster, DruidRole, APP_NAME, - AUTH_AUTHORIZER_OPA_URI, CERTS_DIR, CREDENTIALS_SECRET_PROPERTY, DRUID_CONFIG_DIRECTORY, - DS_BUCKET, EXTENSIONS_LOADLIST, HDFS_CONFIG_DIRECTORY, JVM_CONFIG, LOG4J2_CONFIG, - RUNTIME_PROPS, RW_CONFIG_DIRECTORY, S3_ENDPOINT_URL, S3_PATH_STYLE_ACCESS, S3_SECRET_DIR_NAME, - ZOOKEEPER_CONNECTION_STRING, + DeepStorageSpec, DruidCluster, DruidRole, APP_NAME, AUTH_AUTHORIZER_OPA_URI, CERTS_DIR, + CREDENTIALS_SECRET_PROPERTY, DRUID_CONFIG_DIRECTORY, DS_BUCKET, EXTENSIONS_LOADLIST, + HDFS_CONFIG_DIRECTORY, JVM_CONFIG, LOG4J2_CONFIG, RUNTIME_PROPS, RW_CONFIG_DIRECTORY, + S3_ENDPOINT_URL, S3_PATH_STYLE_ACCESS, S3_SECRET_DIR_NAME, ZOOKEEPER_CONNECTION_STRING, +}; +use stackable_druid_crd::{ + build_recommended_labels, + resource::{self, RoleResource}, }; use stackable_operator::{ builder::{ @@ -159,10 +161,19 @@ pub enum Error { source: strum::ParseError, role: String, }, + #[snafu(display("failed to resolve and merge resource config for role and role group"))] + FailedToResolveResourceConfig { + source: stackable_druid_crd::resource::Error, + }, #[snafu(display("failed to resolve and merge config for role and role group"))] FailedToResolveConfig { source: stackable_druid_crd::Error }, - #[snafu(display("invalid configuration"))] - InvalidConfiguration { source: stackable_druid_crd::Error }, + #[snafu(display("invalid java heap config - missing default or value in crd?"))] + InvalidJavaHeapConfig, + #[snafu(display("failed to convert java heap config to unit [{unit}]"))] + FailedToConvertJavaHeap { + source: stackable_operator::error::Error, + unit: String, + }, #[snafu(display("failed to create cluster resources"))] CreateClusterResources { source: stackable_operator::error::Error, @@ -282,7 +293,9 @@ pub async fn reconcile_druid(druid: Arc, ctx: Arc) -> Result< let druid_tls_security = DruidTlsSecurity::new_from_druid_cluster(&druid, resolved_authentication_classes); - let role_config = transform_all_roles_to_config(druid.as_ref(), druid.build_role_properties()); + // False positive, auto-deref breaks type inference + #[allow(clippy::explicit_auto_deref)] + let role_config = transform_all_roles_to_config(&*druid, druid.build_role_properties()); let validated_role_config = validate_all_roles_and_groups_config( &resolved_product_image.product_version, &role_config.context(ProductConfigTransformSnafu)?, @@ -300,7 +313,7 @@ pub async fn reconcile_druid(druid: Arc, ctx: Arc) -> Result< ) .context(CreateClusterResourcesSnafu)?; - let merged_config = druid.merged_config().context(FailedToResolveConfigSnafu)?; + let config = druid.merged_config().context(FailedToResolveConfigSnafu)?; for (role_name, role_config) in validated_role_config.iter() { let druid_role = DruidRole::from_str(role_name).context(UnidentifiedDruidRoleSnafu { @@ -329,9 +342,7 @@ pub async fn reconcile_druid(druid: Arc, ctx: Arc) -> Result< role_group: rolegroup_name.into(), }; - let merged_rolegroup_config = merged_config - .common_config(druid_role.clone(), rolegroup_name) - .context(FailedToResolveConfigSnafu)?; + let resources = config.resources(druid_role.clone(), rolegroup_name); let rg_service = build_rolegroup_services( &druid, @@ -344,11 +355,11 @@ pub async fn reconcile_druid(druid: Arc, ctx: Arc) -> Result< &resolved_product_image, &rolegroup, rolegroup_config, - &merged_rolegroup_config, &zk_connstr, opa_connstr.as_deref(), s3_conn.as_ref(), deep_storage_bucket_name.as_deref(), + &resources, &druid_tls_security, &druid_ldap_settings, )?; @@ -357,8 +368,8 @@ pub async fn reconcile_druid(druid: Arc, ctx: Arc) -> Result< &resolved_product_image, &rolegroup, rolegroup_config, - &merged_rolegroup_config, s3_conn.as_ref(), + &resources, &druid_tls_security, &druid_ldap_settings, )?; @@ -452,11 +463,11 @@ fn build_rolegroup_config_map( resolved_product_image: &ResolvedProductImage, rolegroup: &RoleGroupRef, rolegroup_config: &HashMap>, - merged_rolegroup_config: &CommonRoleGroupConfig, zk_connstr: &str, opa_connstr: Option<&str>, s3_conn: Option<&S3ConnectionSpec>, deep_storage_bucket_name: Option<&str>, + resources: &RoleResource, druid_tls_security: &DruidTlsSecurity, druid_ldap_settings: &Option, ) -> Result { @@ -472,8 +483,7 @@ fn build_rolegroup_config_map( // This has to be done here since there is no other suitable place for it. // Previously such properties were added in the compute_files() function, // but that code path is now incompatible with the design of fragment merging. - merged_rolegroup_config - .resources + resources .update_druid_config_file(&mut conf) .context(UpdateDruidConfigFromResourcesSnafu)?; // NOTE: druid.host can be set manually - if it isn't, the canonical host name of @@ -541,8 +551,7 @@ fn build_rolegroup_config_map( cm_conf_data.insert(RUNTIME_PROPS.to_string(), runtime_properties); } PropertyNameKind::File(file_name) if file_name == JVM_CONFIG => { - let (heap, direct) = merged_rolegroup_config - .resources + let (heap, direct) = resources .get_memory_sizes(&role) .context(DeriveMemorySettingsSnafu)?; let jvm_config = get_jvm_config(&role, heap, direct).context(GetJvmConfigSnafu)?; @@ -637,8 +646,8 @@ fn build_rolegroup_statefulset( resolved_product_image: &ResolvedProductImage, rolegroup_ref: &RoleGroupRef, rolegroup_config: &HashMap>, - merged_rolegroup_config: &CommonRoleGroupConfig, s3_conn: Option<&S3ConnectionSpec>, + resources: &RoleResource, druid_tls_security: &DruidTlsSecurity, ldap_settings: &Option, ) -> Result { @@ -654,7 +663,7 @@ fn build_rolegroup_statefulset( .context(FailedContainerBuilderCreationSnafu { name: APP_NAME })?; // init pod builder let mut pb = PodBuilder::new(); - pb.node_selector_opt(merged_rolegroup_config.selector.to_owned()); + pb.node_selector_opt(druid.node_selector(rolegroup_ref)); if let Some(ldap_settings) = ldap_settings { // TODO: Connecting to an LDAP server without bind credentials does not seem to be configurable in Druid at the moment @@ -681,9 +690,7 @@ fn build_rolegroup_statefulset( &mut cb_druid, &mut pb, ); - merged_rolegroup_config - .resources - .update_volumes_and_volume_mounts(&mut cb_druid, &mut pb); + resources.update_volumes_and_volume_mounts(&mut cb_druid, &mut pb); let prepare_container_command = druid_tls_security.build_tls_key_stores_cmd(); @@ -720,7 +727,7 @@ fn build_rolegroup_statefulset( .readiness_probe(druid_tls_security.get_tcp_socket_probe(10, 10, 1, 3)) // 10s * 3 = 30s to be restarted .liveness_probe(druid_tls_security.get_tcp_socket_probe(10, 10, 3, 3)) - .resources(merged_rolegroup_config.resources.as_resource_requirements()); + .resources(resources.as_resource_requirements()); pb.image_pull_secrets_from_product_image(resolved_product_image) .add_init_container(cb_prepare.build()) @@ -761,7 +768,7 @@ fn build_rolegroup_statefulset( replicas: if druid.spec.stopped.unwrap_or(false) { Some(0) } else { - merged_rolegroup_config.replicas.map(i32::from) + druid.replicas(rolegroup_ref) }, selector: LabelSelector { match_labels: Some(role_group_selector_labels( @@ -919,10 +926,12 @@ mod test { OperatorFramework { source: stackable_operator::error::Error, }, + #[snafu(display("resource error"))] + Resource { + source: stackable_druid_crd::resource::Error, + }, #[snafu(display("failed to resolve and merge config for role and role group"))] FailedToResolveConfig { source: stackable_druid_crd::Error }, - #[snafu(display("invalid configuration"))] - InvalidConfiguration { source: stackable_druid_crd::Error }, } #[rstest] @@ -985,9 +994,7 @@ mod test { role_group: rolegroup_name.clone(), }; - let merged_rolegroup_config = config - .common_config(DruidRole::Historical, rolegroup_name) - .context(InvalidConfigurationSnafu)?; + let resources = config.resources(DruidRole::Historical, rolegroup_name); let ldap_settings: Option = None; @@ -996,11 +1003,11 @@ mod test { &resolved_product_image, &rolegroup_ref, rolegroup_config, - &merged_rolegroup_config, "zookeeper-connection-string", None, None, None, + &resources, &druid_tls_security, &ldap_settings, ) From e4dd4f7591eb3f5d3a596deccaaf4e9795924fcd Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Wed, 22 Feb 2023 12:56:15 +0100 Subject: [PATCH 02/27] Add replicas and selector to CommonConfig --- rust/crd/src/lib.rs | 246 +++++++------------ rust/crd/src/resource.rs | 35 ++- rust/operator-binary/src/druid_controller.rs | 49 ++-- 3 files changed, 152 insertions(+), 178 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 450b0cea..e6f946cb 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -31,13 +31,10 @@ use stackable_operator::{ labels::ObjectLabels, product_config::types::PropertyNameKind, product_config_utils::{ConfigError, Configuration}, - role_utils::{CommonConfiguration, Role, RoleGroup, RoleGroupRef}, + role_utils::{CommonConfiguration, Role, RoleGroup}, schemars::{self, JsonSchema}, }; -use std::{ - collections::{BTreeMap, HashMap}, - str::FromStr, -}; +use std::collections::{BTreeMap, HashMap}; use strum::{Display, EnumDiscriminants, EnumIter, EnumString, IntoStaticStr}; use tls::default_druid_tls; @@ -130,6 +127,8 @@ pub enum Error { IncompatibleS3Connections, #[snafu(display("Unknown Druid role found {role}. Should be one of {roles:?}"))] UnknownDruidRole { role: String, roles: Vec }, + #[snafu(display("the role group {rolegroup_name} is not defined"))] + CannotRetrieveRoleGroup { rolegroup_name: String }, #[snafu(display("missing namespace for resource {name}"))] MissingNamespace { name: String }, #[snafu(display("fragment validation failure"))] @@ -198,66 +197,76 @@ pub struct DruidClusterConfig { } pub struct MergedConfig { - pub brokers: HashMap, - pub coordinators: HashMap, - pub historicals: HashMap, - pub middle_managers: HashMap, - pub routers: HashMap, + pub brokers: HashMap>, + pub coordinators: HashMap>, + pub historicals: HashMap>, + pub middle_managers: HashMap>, + pub routers: HashMap>, } impl MergedConfig { - pub fn resources(&self, role: DruidRole, role_group: &str) -> RoleResource { - self.common_config(role, role_group).resources - } - - pub fn common_config(&self, role: DruidRole, role_group: &str) -> CommonConfig { + pub fn common_config( + &self, + role: DruidRole, + rolegroup_name: &str, + ) -> Result { match role { DruidRole::Broker => { - let config = self + let rolegroup = self .brokers - .get(role_group) - .cloned() - // TODO default? - .unwrap_or_default(); - CommonConfig { - resources: RoleResource::Druid(config.resources), - } + .get(rolegroup_name) + .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; + Ok(CommonConfig { + resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), + replicas: rolegroup.replicas, + selector: rolegroup.selector.to_owned(), + }) } DruidRole::Coordinator => { - let config = self + let rolegroup = self .coordinators - .get(role_group) - .cloned() - .unwrap_or_default(); - CommonConfig { - resources: RoleResource::Druid(config.resources), - } + .get(rolegroup_name) + .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; + Ok(CommonConfig { + resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), + replicas: rolegroup.replicas, + selector: rolegroup.selector.to_owned(), + }) } DruidRole::Historical => { - let config = self + let rolegroup = self .historicals - .get(role_group) - .cloned() - .unwrap_or_default(); - CommonConfig { - resources: RoleResource::Historical(config.resources), - } + .get(rolegroup_name) + .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; + Ok(CommonConfig { + resources: RoleResource::Historical( + rolegroup.config.config.resources.to_owned(), + ), + replicas: rolegroup.replicas, + selector: rolegroup.selector.to_owned(), + }) } DruidRole::MiddleManager => { - let config = self + let rolegroup = self .middle_managers - .get(role_group) - .cloned() - .unwrap_or_default(); - CommonConfig { - resources: RoleResource::Druid(config.resources), - } + .get(rolegroup_name) + .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; + Ok(CommonConfig { + resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), + replicas: rolegroup.replicas, + selector: rolegroup.selector.to_owned(), + }) } DruidRole::Router => { - let config = self.routers.get(role_group).cloned().unwrap_or_default(); - CommonConfig { - resources: RoleResource::Druid(config.resources), - } + let rolegroup = self + .routers + .get(rolegroup_name) + .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; + Ok(CommonConfig { + resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), + replicas: rolegroup.replicas, + selector: rolegroup.selector.to_owned(), + }) } } } @@ -265,6 +274,8 @@ impl MergedConfig { pub struct CommonConfig { pub resources: RoleResource, + pub replicas: Option, + pub selector: Option, } #[derive( @@ -448,86 +459,6 @@ impl DruidCluster { Ok(result) } - /// Takes a rolegoup_ref (with role and role group name) and returns the selector defined for - /// that role group. - pub fn node_selector( - &self, - rolegroup_ref: &RoleGroupRef, - ) -> Option { - match DruidRole::from_str(rolegroup_ref.role.as_str()).unwrap() { - DruidRole::Broker => self - .spec - .brokers - .role_groups - .get(&rolegroup_ref.role_group) - .and_then(|rg| rg.selector.clone()), - DruidRole::MiddleManager => self - .spec - .middle_managers - .role_groups - .get(&rolegroup_ref.role_group) - .and_then(|rg| rg.selector.clone()), - DruidRole::Coordinator => self - .spec - .coordinators - .role_groups - .get(&rolegroup_ref.role_group) - .and_then(|rg| rg.selector.clone()), - DruidRole::Historical => self - .spec - .historicals - .role_groups - .get(&rolegroup_ref.role_group) - .and_then(|rg| rg.selector.clone()), - DruidRole::Router => self - .spec - .routers - .role_groups - .get(&rolegroup_ref.role_group) - .and_then(|rg| rg.selector.clone()), - } - } - - pub fn replicas(&self, rolegroup_ref: &RoleGroupRef) -> Option { - match DruidRole::from_str(rolegroup_ref.role.as_str()).unwrap() { - DruidRole::Broker => self - .spec - .brokers - .role_groups - .get(&rolegroup_ref.role_group) - .and_then(|rg| rg.replicas) - .map(i32::from), - DruidRole::MiddleManager => self - .spec - .middle_managers - .role_groups - .get(&rolegroup_ref.role_group) - .and_then(|rg| rg.replicas) - .map(i32::from), - DruidRole::Coordinator => self - .spec - .coordinators - .role_groups - .get(&rolegroup_ref.role_group) - .and_then(|rg| rg.replicas) - .map(i32::from), - DruidRole::Historical => self - .spec - .historicals - .role_groups - .get(&rolegroup_ref.role_group) - .and_then(|rg| rg.replicas) - .map(i32::from), - DruidRole::Router => self - .spec - .routers - .role_groups - .get(&rolegroup_ref.role_group) - .and_then(|rg| rg.replicas) - .map(i32::from), - } - } - pub fn build_role_properties( &self, ) -> HashMap< @@ -671,62 +602,73 @@ impl DruidCluster { pub fn merged_config(&self) -> Result { Ok(MergedConfig { - brokers: DruidCluster::merged_role_config( + brokers: DruidCluster::merged_role( &self.spec.brokers, &BrokerConfig::default_config(), )?, - coordinators: DruidCluster::merged_role_config( + coordinators: DruidCluster::merged_role( &self.spec.coordinators, &CoordinatorConfig::default_config(), )?, - historicals: DruidCluster::merged_role_config( + historicals: DruidCluster::merged_role( &self.spec.historicals, &HistoricalConfig::default_config(), )?, - middle_managers: DruidCluster::merged_role_config( + middle_managers: DruidCluster::merged_role( &self.spec.middle_managers, &MiddleManagerConfig::default_config(), )?, - routers: DruidCluster::merged_role_config( + routers: DruidCluster::merged_role( &self.spec.routers, &RouterConfig::default_config(), )?, }) } - fn merged_role_config( + fn merged_role( role: &Role, default_config: &T::Fragment, - ) -> Result, Error> + ) -> Result>, Error> where T: FromFragment, T::Fragment: Clone + Merge, { let mut merged_role_config = HashMap::new(); - for ( - rolegroup_name, - RoleGroup { - config: - CommonConfiguration { - config: rolegroup_config, - .. - }, - .. - }, - ) in &role.role_groups - { - let merged_rolegroup_config = DruidCluster::merged_rolegroup_config( - rolegroup_config, - &role.config.config, - default_config, - )?; + for (rolegroup_name, rolegroup) in &role.role_groups { + let merged_rolegroup_config = + DruidCluster::merged_rolegroup(rolegroup, &role.config.config, default_config)?; merged_role_config.insert(rolegroup_name.to_owned(), merged_rolegroup_config); } Ok(merged_role_config) } + fn merged_rolegroup( + rolegroup: &RoleGroup, + role_config: &T::Fragment, + default_config: &T::Fragment, + ) -> Result, Error> + where + T: FromFragment, + T::Fragment: Clone + Merge, + { + Ok(RoleGroup { + config: CommonConfiguration { + config: DruidCluster::merged_rolegroup_config( + &rolegroup.config.config, + role_config, + default_config, + )?, + config_overrides: rolegroup.config.config_overrides.to_owned(), + env_overrides: rolegroup.config.env_overrides.to_owned(), + cli_overrides: rolegroup.config.cli_overrides.to_owned(), + }, + replicas: rolegroup.replicas, + selector: rolegroup.selector.to_owned(), + }) + } + pub fn merged_rolegroup_config( rolegroup_config: &T::Fragment, role_config: &T::Fragment, diff --git a/rust/crd/src/resource.rs b/rust/crd/src/resource.rs index 7dbc1d42..232fa82e 100644 --- a/rust/crd/src/resource.rs +++ b/rust/crd/src/resource.rs @@ -192,6 +192,7 @@ mod test { NoRuntimeLimitsFragment, }, k8s_openapi::apimachinery::pkg::api::resource::Quantity, + role_utils::{CommonConfiguration, RoleGroup}, }; #[rstest] @@ -343,8 +344,16 @@ mod test { ); let config = cluster.merged_config().unwrap(); - if let Some(MiddleManagerConfig { - resources: middlemanager_resources_from_rg, + if let Some(RoleGroup { + config: + CommonConfiguration { + config: + MiddleManagerConfig { + resources: middlemanager_resources_from_rg, + }, + .. + }, + .. }) = config.middle_managers.get("resources-from-role-group") { let expected = Resources { @@ -367,8 +376,16 @@ mod test { panic!("No role group named [resources-from-role-group] found"); } - if let Some(MiddleManagerConfig { - resources: middlemanager_resources_from_rg, + if let Some(RoleGroup { + config: + CommonConfiguration { + config: + MiddleManagerConfig { + resources: middlemanager_resources_from_rg, + }, + .. + }, + .. }) = config.middle_managers.get("resources-from-role") { let expected = Resources { @@ -402,7 +419,10 @@ mod test { // ---------- default role group let config = cluster.merged_config().unwrap(); - let res = config.resources(DruidRole::Historical, "default"); + let res = config + .common_config(DruidRole::Historical, "default") + .unwrap() + .resources; let mut got = BTreeMap::new(); assert!(res.update_druid_config_file(&mut got).is_ok()); @@ -413,7 +433,10 @@ mod test { assert_eq!(value, &expected, "primary"); // ---------- secondary role group - let res = config.resources(DruidRole::Historical, "secondary"); + let res = config + .common_config(DruidRole::Historical, "secondary") + .unwrap() + .resources; let mut got = BTreeMap::new(); assert!(res.update_druid_config_file(&mut got).is_ok()); diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index 0022cb40..41958344 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -11,6 +11,7 @@ use crate::{ }; use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_druid_crd::build_recommended_labels; use stackable_druid_crd::{ authorization::DruidAuthorization, build_string_list, @@ -19,15 +20,11 @@ use stackable_druid_crd::{ PLACEHOLDER_LDAP_BIND_USER, }, security::{resolve_authentication_classes, DruidTlsSecurity}, - DeepStorageSpec, DruidCluster, DruidRole, APP_NAME, AUTH_AUTHORIZER_OPA_URI, CERTS_DIR, - CREDENTIALS_SECRET_PROPERTY, DRUID_CONFIG_DIRECTORY, DS_BUCKET, EXTENSIONS_LOADLIST, + CommonConfig, DeepStorageSpec, DruidCluster, DruidRole, APP_NAME, AUTH_AUTHORIZER_OPA_URI, + CERTS_DIR, CREDENTIALS_SECRET_PROPERTY, DRUID_CONFIG_DIRECTORY, DS_BUCKET, EXTENSIONS_LOADLIST, HDFS_CONFIG_DIRECTORY, JVM_CONFIG, LOG4J2_CONFIG, RUNTIME_PROPS, RW_CONFIG_DIRECTORY, S3_ENDPOINT_URL, S3_PATH_STYLE_ACCESS, S3_SECRET_DIR_NAME, ZOOKEEPER_CONNECTION_STRING, }; -use stackable_druid_crd::{ - build_recommended_labels, - resource::{self, RoleResource}, -}; use stackable_operator::{ builder::{ ConfigMapBuilder, ContainerBuilder, ObjectMetaBuilder, PodBuilder, @@ -167,6 +164,8 @@ pub enum Error { }, #[snafu(display("failed to resolve and merge config for role and role group"))] FailedToResolveConfig { source: stackable_druid_crd::Error }, + #[snafu(display("invalid configuration"))] + InvalidConfiguration { source: stackable_druid_crd::Error }, #[snafu(display("invalid java heap config - missing default or value in crd?"))] InvalidJavaHeapConfig, #[snafu(display("failed to convert java heap config to unit [{unit}]"))] @@ -313,7 +312,7 @@ pub async fn reconcile_druid(druid: Arc, ctx: Arc) -> Result< ) .context(CreateClusterResourcesSnafu)?; - let config = druid.merged_config().context(FailedToResolveConfigSnafu)?; + let merged_config = druid.merged_config().context(FailedToResolveConfigSnafu)?; for (role_name, role_config) in validated_role_config.iter() { let druid_role = DruidRole::from_str(role_name).context(UnidentifiedDruidRoleSnafu { @@ -342,7 +341,9 @@ pub async fn reconcile_druid(druid: Arc, ctx: Arc) -> Result< role_group: rolegroup_name.into(), }; - let resources = config.resources(druid_role.clone(), rolegroup_name); + let merged_rolegroup_config = merged_config + .common_config(druid_role.clone(), rolegroup_name) + .context(FailedToResolveConfigSnafu)?; let rg_service = build_rolegroup_services( &druid, @@ -355,11 +356,11 @@ pub async fn reconcile_druid(druid: Arc, ctx: Arc) -> Result< &resolved_product_image, &rolegroup, rolegroup_config, + &merged_rolegroup_config, &zk_connstr, opa_connstr.as_deref(), s3_conn.as_ref(), deep_storage_bucket_name.as_deref(), - &resources, &druid_tls_security, &druid_ldap_settings, )?; @@ -368,8 +369,8 @@ pub async fn reconcile_druid(druid: Arc, ctx: Arc) -> Result< &resolved_product_image, &rolegroup, rolegroup_config, + &merged_rolegroup_config, s3_conn.as_ref(), - &resources, &druid_tls_security, &druid_ldap_settings, )?; @@ -463,11 +464,11 @@ fn build_rolegroup_config_map( resolved_product_image: &ResolvedProductImage, rolegroup: &RoleGroupRef, rolegroup_config: &HashMap>, + merged_rolegroup_config: &CommonConfig, zk_connstr: &str, opa_connstr: Option<&str>, s3_conn: Option<&S3ConnectionSpec>, deep_storage_bucket_name: Option<&str>, - resources: &RoleResource, druid_tls_security: &DruidTlsSecurity, druid_ldap_settings: &Option, ) -> Result { @@ -483,7 +484,8 @@ fn build_rolegroup_config_map( // This has to be done here since there is no other suitable place for it. // Previously such properties were added in the compute_files() function, // but that code path is now incompatible with the design of fragment merging. - resources + merged_rolegroup_config + .resources .update_druid_config_file(&mut conf) .context(UpdateDruidConfigFromResourcesSnafu)?; // NOTE: druid.host can be set manually - if it isn't, the canonical host name of @@ -551,7 +553,8 @@ fn build_rolegroup_config_map( cm_conf_data.insert(RUNTIME_PROPS.to_string(), runtime_properties); } PropertyNameKind::File(file_name) if file_name == JVM_CONFIG => { - let (heap, direct) = resources + let (heap, direct) = merged_rolegroup_config + .resources .get_memory_sizes(&role) .context(DeriveMemorySettingsSnafu)?; let jvm_config = get_jvm_config(&role, heap, direct).context(GetJvmConfigSnafu)?; @@ -646,8 +649,8 @@ fn build_rolegroup_statefulset( resolved_product_image: &ResolvedProductImage, rolegroup_ref: &RoleGroupRef, rolegroup_config: &HashMap>, + merged_rolegroup_config: &CommonConfig, s3_conn: Option<&S3ConnectionSpec>, - resources: &RoleResource, druid_tls_security: &DruidTlsSecurity, ldap_settings: &Option, ) -> Result { @@ -663,7 +666,7 @@ fn build_rolegroup_statefulset( .context(FailedContainerBuilderCreationSnafu { name: APP_NAME })?; // init pod builder let mut pb = PodBuilder::new(); - pb.node_selector_opt(druid.node_selector(rolegroup_ref)); + pb.node_selector_opt(merged_rolegroup_config.selector.to_owned()); if let Some(ldap_settings) = ldap_settings { // TODO: Connecting to an LDAP server without bind credentials does not seem to be configurable in Druid at the moment @@ -690,7 +693,9 @@ fn build_rolegroup_statefulset( &mut cb_druid, &mut pb, ); - resources.update_volumes_and_volume_mounts(&mut cb_druid, &mut pb); + merged_rolegroup_config + .resources + .update_volumes_and_volume_mounts(&mut cb_druid, &mut pb); let prepare_container_command = druid_tls_security.build_tls_key_stores_cmd(); @@ -727,7 +732,7 @@ fn build_rolegroup_statefulset( .readiness_probe(druid_tls_security.get_tcp_socket_probe(10, 10, 1, 3)) // 10s * 3 = 30s to be restarted .liveness_probe(druid_tls_security.get_tcp_socket_probe(10, 10, 3, 3)) - .resources(resources.as_resource_requirements()); + .resources(merged_rolegroup_config.resources.as_resource_requirements()); pb.image_pull_secrets_from_product_image(resolved_product_image) .add_init_container(cb_prepare.build()) @@ -768,7 +773,7 @@ fn build_rolegroup_statefulset( replicas: if druid.spec.stopped.unwrap_or(false) { Some(0) } else { - druid.replicas(rolegroup_ref) + merged_rolegroup_config.replicas.map(i32::from) }, selector: LabelSelector { match_labels: Some(role_group_selector_labels( @@ -932,6 +937,8 @@ mod test { }, #[snafu(display("failed to resolve and merge config for role and role group"))] FailedToResolveConfig { source: stackable_druid_crd::Error }, + #[snafu(display("invalid configuration"))] + InvalidConfiguration { source: stackable_druid_crd::Error }, } #[rstest] @@ -994,7 +1001,9 @@ mod test { role_group: rolegroup_name.clone(), }; - let resources = config.resources(DruidRole::Historical, rolegroup_name); + let merged_rolegroup_config = config + .common_config(DruidRole::Historical, rolegroup_name) + .context(InvalidConfigurationSnafu)?; let ldap_settings: Option = None; @@ -1003,11 +1012,11 @@ mod test { &resolved_product_image, &rolegroup_ref, rolegroup_config, + &merged_rolegroup_config, "zookeeper-connection-string", None, None, None, - &resources, &druid_tls_security, &ldap_settings, ) From cb1cd8c1e1f77275f563d571ee6860d589dff18e Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Wed, 22 Feb 2023 13:06:24 +0100 Subject: [PATCH 03/27] Remove unused error variants --- rust/crd/src/lib.rs | 2 -- rust/operator-binary/src/druid_controller.rs | 15 --------------- 2 files changed, 17 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index e6f946cb..fe656ed8 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -125,8 +125,6 @@ pub enum Error { }, #[snafu(display("2 differing s3 connections were given, this is unsupported by Druid"))] IncompatibleS3Connections, - #[snafu(display("Unknown Druid role found {role}. Should be one of {roles:?}"))] - UnknownDruidRole { role: String, roles: Vec }, #[snafu(display("the role group {rolegroup_name} is not defined"))] CannotRetrieveRoleGroup { rolegroup_name: String }, #[snafu(display("missing namespace for resource {name}"))] diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index 41958344..ab4b8f0f 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -158,21 +158,10 @@ pub enum Error { source: strum::ParseError, role: String, }, - #[snafu(display("failed to resolve and merge resource config for role and role group"))] - FailedToResolveResourceConfig { - source: stackable_druid_crd::resource::Error, - }, #[snafu(display("failed to resolve and merge config for role and role group"))] FailedToResolveConfig { source: stackable_druid_crd::Error }, #[snafu(display("invalid configuration"))] InvalidConfiguration { source: stackable_druid_crd::Error }, - #[snafu(display("invalid java heap config - missing default or value in crd?"))] - InvalidJavaHeapConfig, - #[snafu(display("failed to convert java heap config to unit [{unit}]"))] - FailedToConvertJavaHeap { - source: stackable_operator::error::Error, - unit: String, - }, #[snafu(display("failed to create cluster resources"))] CreateClusterResources { source: stackable_operator::error::Error, @@ -931,10 +920,6 @@ mod test { OperatorFramework { source: stackable_operator::error::Error, }, - #[snafu(display("resource error"))] - Resource { - source: stackable_druid_crd::resource::Error, - }, #[snafu(display("failed to resolve and merge config for role and role group"))] FailedToResolveConfig { source: stackable_druid_crd::Error }, #[snafu(display("invalid configuration"))] From f1fae47a3625f7b54042756a13ea59b617d91366 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Wed, 22 Feb 2023 13:52:42 +0100 Subject: [PATCH 04/27] Update changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36946e54..8d8c71a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ All notable changes to this project will be documented in this file. ### Changed -- Upgrade to `operator-rs` `0.35.0` ([#374], [#380], [#404]) +- Upgrade to `operator-rs` `0.31.0` ([#374]) - Merging and validation of the configuration refactored ([#404]). ### Fixed From ba11a8f74f43ff830de284d213f351504d81d3c7 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Wed, 22 Feb 2023 14:12:44 +0100 Subject: [PATCH 05/27] Upgrade operator-rs to version 0.35.0 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d8c71a5..36946e54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ All notable changes to this project will be documented in this file. ### Changed -- Upgrade to `operator-rs` `0.31.0` ([#374]) +- Upgrade to `operator-rs` `0.35.0` ([#374], [#380], [#404]) - Merging and validation of the configuration refactored ([#404]). ### Fixed From b4dd7fe3106a08414bdea61464eef851b1f351df Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Wed, 22 Feb 2023 15:02:33 +0100 Subject: [PATCH 06/27] Add code comments --- rust/crd/src/lib.rs | 51 +++++++++++++------- rust/operator-binary/src/druid_controller.rs | 13 ++--- 2 files changed, 41 insertions(+), 23 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index fe656ed8..946d4ecc 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -194,27 +194,45 @@ pub struct DruidClusterConfig { pub zookeeper_config_map_name: String, } +/// Common configuration for all role groups +pub struct CommonRoleGroupConfig { + pub resources: RoleResource, + pub replicas: Option, + pub selector: Option, +} + +/// Container for the merged and validated role group configurations +/// +/// This structure contains for every role a map from the role group names to their configurations. +/// The role group configurations are merged with the role and default configurations. The product +/// configuration is not applied. pub struct MergedConfig { + /// Merged configuration of the broker role pub brokers: HashMap>, + /// Merged configuration of the coordinator role pub coordinators: HashMap>, + /// Merged configuration of the historical role pub historicals: HashMap>, + /// Merged configuration of the middle manager role pub middle_managers: HashMap>, + /// Merged configuration of the router role pub routers: HashMap>, } impl MergedConfig { + /// Returns the common configuration for the given role and rolegroup name pub fn common_config( &self, role: DruidRole, rolegroup_name: &str, - ) -> Result { + ) -> Result { match role { DruidRole::Broker => { let rolegroup = self .brokers .get(rolegroup_name) .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; - Ok(CommonConfig { + Ok(CommonRoleGroupConfig { resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), replicas: rolegroup.replicas, selector: rolegroup.selector.to_owned(), @@ -225,7 +243,7 @@ impl MergedConfig { .coordinators .get(rolegroup_name) .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; - Ok(CommonConfig { + Ok(CommonRoleGroupConfig { resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), replicas: rolegroup.replicas, selector: rolegroup.selector.to_owned(), @@ -236,7 +254,7 @@ impl MergedConfig { .historicals .get(rolegroup_name) .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; - Ok(CommonConfig { + Ok(CommonRoleGroupConfig { resources: RoleResource::Historical( rolegroup.config.config.resources.to_owned(), ), @@ -249,7 +267,7 @@ impl MergedConfig { .middle_managers .get(rolegroup_name) .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; - Ok(CommonConfig { + Ok(CommonRoleGroupConfig { resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), replicas: rolegroup.replicas, selector: rolegroup.selector.to_owned(), @@ -260,7 +278,7 @@ impl MergedConfig { .routers .get(rolegroup_name) .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; - Ok(CommonConfig { + Ok(CommonRoleGroupConfig { resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), replicas: rolegroup.replicas, selector: rolegroup.selector.to_owned(), @@ -270,12 +288,6 @@ impl MergedConfig { } } -pub struct CommonConfig { - pub resources: RoleResource, - pub replicas: Option, - pub selector: Option, -} - #[derive( Clone, Debug, @@ -598,6 +610,7 @@ impl DruidCluster { s3_ingestion || s3_storage } + /// Returns the merged and validated configuration for all roles pub fn merged_config(&self) -> Result { Ok(MergedConfig { brokers: DruidCluster::merged_role( @@ -623,6 +636,7 @@ impl DruidCluster { }) } + /// Merges and validates the role groups of the given role with the given default configuration fn merged_role( role: &Role, default_config: &T::Fragment, @@ -642,6 +656,7 @@ impl DruidCluster { Ok(merged_role_config) } + /// Merges and validates the given role group with the given role and default configurations fn merged_rolegroup( rolegroup: &RoleGroup, role_config: &T::Fragment, @@ -651,13 +666,14 @@ impl DruidCluster { T: FromFragment, T::Fragment: Clone + Merge, { + let merged_config = DruidCluster::merged_rolegroup_config( + &rolegroup.config.config, + role_config, + default_config, + )?; Ok(RoleGroup { config: CommonConfiguration { - config: DruidCluster::merged_rolegroup_config( - &rolegroup.config.config, - role_config, - default_config, - )?, + config: merged_config, config_overrides: rolegroup.config.config_overrides.to_owned(), env_overrides: rolegroup.config.env_overrides.to_owned(), cli_overrides: rolegroup.config.cli_overrides.to_owned(), @@ -667,6 +683,7 @@ impl DruidCluster { }) } + /// Merges and validates the given role group, role, and default configurations pub fn merged_rolegroup_config( rolegroup_config: &T::Fragment, role_config: &T::Fragment, diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index ab4b8f0f..af3fbe34 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -20,10 +20,11 @@ use stackable_druid_crd::{ PLACEHOLDER_LDAP_BIND_USER, }, security::{resolve_authentication_classes, DruidTlsSecurity}, - CommonConfig, DeepStorageSpec, DruidCluster, DruidRole, APP_NAME, AUTH_AUTHORIZER_OPA_URI, - CERTS_DIR, CREDENTIALS_SECRET_PROPERTY, DRUID_CONFIG_DIRECTORY, DS_BUCKET, EXTENSIONS_LOADLIST, - HDFS_CONFIG_DIRECTORY, JVM_CONFIG, LOG4J2_CONFIG, RUNTIME_PROPS, RW_CONFIG_DIRECTORY, - S3_ENDPOINT_URL, S3_PATH_STYLE_ACCESS, S3_SECRET_DIR_NAME, ZOOKEEPER_CONNECTION_STRING, + CommonRoleGroupConfig, DeepStorageSpec, DruidCluster, DruidRole, APP_NAME, + AUTH_AUTHORIZER_OPA_URI, CERTS_DIR, CREDENTIALS_SECRET_PROPERTY, DRUID_CONFIG_DIRECTORY, + DS_BUCKET, EXTENSIONS_LOADLIST, HDFS_CONFIG_DIRECTORY, JVM_CONFIG, LOG4J2_CONFIG, + RUNTIME_PROPS, RW_CONFIG_DIRECTORY, S3_ENDPOINT_URL, S3_PATH_STYLE_ACCESS, S3_SECRET_DIR_NAME, + ZOOKEEPER_CONNECTION_STRING, }; use stackable_operator::{ builder::{ @@ -453,7 +454,7 @@ fn build_rolegroup_config_map( resolved_product_image: &ResolvedProductImage, rolegroup: &RoleGroupRef, rolegroup_config: &HashMap>, - merged_rolegroup_config: &CommonConfig, + merged_rolegroup_config: &CommonRoleGroupConfig, zk_connstr: &str, opa_connstr: Option<&str>, s3_conn: Option<&S3ConnectionSpec>, @@ -638,7 +639,7 @@ fn build_rolegroup_statefulset( resolved_product_image: &ResolvedProductImage, rolegroup_ref: &RoleGroupRef, rolegroup_config: &HashMap>, - merged_rolegroup_config: &CommonConfig, + merged_rolegroup_config: &CommonRoleGroupConfig, s3_conn: Option<&S3ConnectionSpec>, druid_tls_security: &DruidTlsSecurity, ldap_settings: &Option, From ccc5f6d6c6010969dc995c83e8ce2b19e86bf3fc Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Thu, 23 Feb 2023 15:20:20 +0100 Subject: [PATCH 07/27] Enable log aggregation --- rust/crd/src/lib.rs | 68 +++++++- rust/crd/src/resource.rs | 2 + rust/operator-binary/src/config.rs | 42 +---- rust/operator-binary/src/druid_controller.rs | 151 ++++++++++++++++-- rust/operator-binary/src/main.rs | 1 + rust/operator-binary/src/product_logging.rs | 125 +++++++++++++++ .../templates/kuttl/resources/20-assert.yaml | 2 + tests/templates/kuttl/smoke/03-assert.yaml | 2 + tests/test-definition.yaml | 6 +- 9 files changed, 339 insertions(+), 60 deletions(-) create mode 100644 rust/operator-binary/src/product_logging.rs diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 946d4ecc..ce421b99 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -31,6 +31,7 @@ use stackable_operator::{ labels::ObjectLabels, product_config::types::PropertyNameKind, product_config_utils::{ConfigError, Configuration}, + product_logging::{self, spec::Logging}, role_utils::{CommonConfiguration, Role, RoleGroup}, schemars::{self, JsonSchema}, }; @@ -44,12 +45,13 @@ pub const OPERATOR_NAME: &str = "druid.stackable.tech"; // config directories pub const DRUID_CONFIG_DIRECTORY: &str = "/stackable/config"; pub const HDFS_CONFIG_DIRECTORY: &str = "/stackable/hdfs"; +pub const LOG_CONFIG_DIRECTORY: &str = "/stackable/log_config"; pub const RW_CONFIG_DIRECTORY: &str = "/stackable/rwconfig"; // config file names pub const JVM_CONFIG: &str = "jvm.config"; pub const RUNTIME_PROPS: &str = "runtime.properties"; -pub const LOG4J2_CONFIG: &str = "log4j2.xml"; +pub const LOG4J2_CONFIG: &str = "log4j2.properties"; // store directories pub const SYSTEM_TRUST_STORE: &str = "/etc/pki/java/cacerts"; @@ -57,6 +59,10 @@ pub const SYSTEM_TRUST_STORE_PASSWORD: &str = "changeit"; pub const STACKABLE_TRUST_STORE: &str = "/stackable/truststore.p12"; pub const STACKABLE_TRUST_STORE_PASSWORD: &str = "changeit"; pub const CERTS_DIR: &str = "/stackable/certificates"; +pub const LOG_DIR: &str = "/stackable/log"; + +// store file names +pub const DRUID_LOG_FILE: &str = "druid.log4j2.xml"; pub const PROP_SEGMENT_CACHE_LOCATIONS: &str = "druid.segmentCache.locations"; pub const PATH_SEGMENT_CACHE: &str = "/stackable/var/druid/segment-cache"; @@ -97,6 +103,11 @@ pub const PROCESSING_NUM_MERGE_BUFFERS: &str = "druid.processing.numMergeBuffers pub const PROCESSING_NUM_THREADS: &str = "druid.processing.numThreads"; // extra pub const CREDENTIALS_SECRET_PROPERTY: &str = "credentialsSecret"; +// logs +pub const MAX_DRUID_LOG_FILES_SIZE_IN_MIB: u32 = 10; +const MAX_PREPARE_LOG_FILE_SIZE_IN_MIB: u32 = 1; +pub const LOG_VOLUME_SIZE_IN_MIB: u32 = + MAX_DRUID_LOG_FILES_SIZE_IN_MIB + MAX_PREPARE_LOG_FILE_SIZE_IN_MIB; // metrics pub const PROMETHEUS_PORT: &str = "druid.emitter.prometheus.port"; pub const METRICS_PORT: u16 = 9090; @@ -169,6 +180,27 @@ pub struct DruidClusterSpec { pub cluster_config: DruidClusterConfig, } +#[derive( + Clone, + Debug, + Deserialize, + Display, + Eq, + EnumIter, + JsonSchema, + Ord, + PartialEq, + PartialOrd, + Serialize, +)] +#[serde(rename_all = "kebab-case")] +#[strum(serialize_all = "kebab-case")] +pub enum Container { + Prepare, + Vector, + Druid, +} + #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] #[serde(rename_all = "camelCase")] pub struct DruidClusterConfig { @@ -192,11 +224,16 @@ pub struct DruidClusterConfig { pub tls: Option, /// ZooKeeper discovery ConfigMap pub zookeeper_config_map_name: String, + /// Name of the Vector aggregator discovery ConfigMap. + /// It must contain the key `ADDRESS` with the address of the Vector aggregator. + #[serde(skip_serializing_if = "Option::is_none")] + pub vector_aggregator_config_map_name: Option, } /// Common configuration for all role groups pub struct CommonRoleGroupConfig { pub resources: RoleResource, + pub logging: Logging, pub replicas: Option, pub selector: Option, } @@ -234,6 +271,7 @@ impl MergedConfig { .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; Ok(CommonRoleGroupConfig { resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), + logging: rolegroup.config.config.logging.to_owned(), replicas: rolegroup.replicas, selector: rolegroup.selector.to_owned(), }) @@ -245,6 +283,7 @@ impl MergedConfig { .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; Ok(CommonRoleGroupConfig { resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), + logging: rolegroup.config.config.logging.to_owned(), replicas: rolegroup.replicas, selector: rolegroup.selector.to_owned(), }) @@ -258,6 +297,7 @@ impl MergedConfig { resources: RoleResource::Historical( rolegroup.config.config.resources.to_owned(), ), + logging: rolegroup.config.config.logging.to_owned(), replicas: rolegroup.replicas, selector: rolegroup.selector.to_owned(), }) @@ -269,6 +309,7 @@ impl MergedConfig { .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; Ok(CommonRoleGroupConfig { resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), + logging: rolegroup.config.config.logging.to_owned(), replicas: rolegroup.replicas, selector: rolegroup.selector.to_owned(), }) @@ -280,6 +321,7 @@ impl MergedConfig { .context(CannotRetrieveRoleGroupSnafu { rolegroup_name })?; Ok(CommonRoleGroupConfig { resources: RoleResource::Druid(rolegroup.config.config.resources.to_owned()), + logging: rolegroup.config.config.logging.to_owned(), replicas: rolegroup.replicas, selector: rolegroup.selector.to_owned(), }) @@ -381,6 +423,13 @@ impl DruidRole { rw_conf = RW_CONFIG_DIRECTORY )); + // copy log config to rw config + shell_cmd.push(format!( + "cp -RL {conf}/* {rw_conf}", + conf = LOG_CONFIG_DIRECTORY, + rw_conf = RW_CONFIG_DIRECTORY + )); + // copy hdfs config to RW_CONFIG_DIRECTORY folder (if available) shell_cmd.push(format!( "cp -RL {hdfs_conf}/* {rw_conf} 2>/dev/null || :", // NOTE: the OR part is here because the command is not applicable sometimes, and would stop everything else from executing @@ -462,7 +511,6 @@ impl DruidCluster { // metrics result.insert(PROMETHEUS_PORT.to_string(), Some(METRICS_PORT.to_string())); } - LOG4J2_CONFIG => {} _ => {} } @@ -481,7 +529,6 @@ impl DruidCluster { let config_files = vec![ PropertyNameKind::Env, PropertyNameKind::File(JVM_CONFIG.to_string()), - PropertyNameKind::File(LOG4J2_CONFIG.to_string()), PropertyNameKind::File(RUNTIME_PROPS.to_string()), ]; @@ -791,12 +838,15 @@ pub struct IngestionSpec { pub struct BrokerConfig { #[fragment_attrs(serde(default))] resources: Resources, + #[fragment_attrs(serde(default))] + pub logging: Logging, } impl BrokerConfig { fn default_config() -> BrokerConfigFragment { BrokerConfigFragment { resources: resource::DEFAULT_RESOURCES.to_owned(), + logging: product_logging::spec::default_logging(), } } } @@ -818,12 +868,15 @@ impl BrokerConfig { pub struct CoordinatorConfig { #[fragment_attrs(serde(default))] resources: Resources, + #[fragment_attrs(serde(default))] + pub logging: Logging, } impl CoordinatorConfig { fn default_config() -> CoordinatorConfigFragment { CoordinatorConfigFragment { resources: resource::DEFAULT_RESOURCES.to_owned(), + logging: product_logging::spec::default_logging(), } } } @@ -845,12 +898,15 @@ impl CoordinatorConfig { pub struct MiddleManagerConfig { #[fragment_attrs(serde(default))] resources: Resources, + #[fragment_attrs(serde(default))] + pub logging: Logging, } impl MiddleManagerConfig { fn default_config() -> MiddleManagerConfigFragment { MiddleManagerConfigFragment { resources: resource::DEFAULT_RESOURCES.to_owned(), + logging: product_logging::spec::default_logging(), } } } @@ -872,12 +928,15 @@ impl MiddleManagerConfig { pub struct RouterConfig { #[fragment_attrs(serde(default))] resources: Resources, + #[fragment_attrs(serde(default))] + pub logging: Logging, } impl RouterConfig { fn default_config() -> RouterConfigFragment { RouterConfigFragment { resources: resource::DEFAULT_RESOURCES.to_owned(), + logging: product_logging::spec::default_logging(), } } } @@ -899,12 +958,15 @@ impl RouterConfig { pub struct HistoricalConfig { #[fragment_attrs(serde(default))] resources: Resources, + #[fragment_attrs(serde(default))] + pub logging: Logging, } impl HistoricalConfig { fn default_config() -> HistoricalConfigFragment { HistoricalConfigFragment { resources: resource::HISTORICAL_RESOURCES.to_owned(), + logging: product_logging::spec::default_logging(), } } } diff --git a/rust/crd/src/resource.rs b/rust/crd/src/resource.rs index 232fa82e..b6602236 100644 --- a/rust/crd/src/resource.rs +++ b/rust/crd/src/resource.rs @@ -350,6 +350,7 @@ mod test { config: MiddleManagerConfig { resources: middlemanager_resources_from_rg, + .. }, .. }, @@ -382,6 +383,7 @@ mod test { config: MiddleManagerConfig { resources: middlemanager_resources_from_rg, + .. }, .. }, diff --git a/rust/operator-binary/src/config.rs b/rust/operator-binary/src/config.rs index e9b75498..c0463f94 100644 --- a/rust/operator-binary/src/config.rs +++ b/rust/operator-binary/src/config.rs @@ -1,6 +1,9 @@ use indoc::formatdoc; use snafu::{ResultExt, Snafu}; -use stackable_druid_crd::{DruidRole, STACKABLE_TRUST_STORE, STACKABLE_TRUST_STORE_PASSWORD}; +use stackable_druid_crd::{ + DruidRole, LOG4J2_CONFIG, RW_CONFIG_DIRECTORY, STACKABLE_TRUST_STORE, + STACKABLE_TRUST_STORE_PASSWORD, +}; use stackable_operator::memory::MemoryQuantity; #[derive(Snafu, Debug)] @@ -36,6 +39,7 @@ pub fn get_jvm_config( -Dfile.encoding=UTF-8 -Djava.io.tmpdir=/tmp -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager + -Dlog4j.configurationFile={RW_CONFIG_DIRECTORY}/{LOG4J2_CONFIG} -XX:+UseG1GC -XX:+ExitOnOutOfMemoryError -Djavax.net.ssl.trustStore={STACKABLE_TRUST_STORE} @@ -53,39 +57,3 @@ pub fn get_jvm_config( } Ok(config) } - -pub fn get_log4j_config(_role: &DruidRole) -> String { - " - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" - .to_string() -} diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index af3fbe34..58d0a4d3 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -1,17 +1,17 @@ //! Ensures that `Pod`s are configured and running for each [`DruidCluster`] use crate::{ - config::{get_jvm_config, get_log4j_config}, + config::get_jvm_config, discovery::{self, build_discovery_configmaps}, extensions::get_extension_list, internal_secret::{ build_shared_internal_secret_name, create_shared_internal_secret, env_var_from_secret, ENV_INTERNAL_SECRET, }, + product_logging::{extend_role_group_config_map, resolve_vector_aggregator_address}, OPERATOR_NAME, }; use snafu::{OptionExt, ResultExt, Snafu}; -use stackable_druid_crd::build_recommended_labels; use stackable_druid_crd::{ authorization::DruidAuthorization, build_string_list, @@ -22,10 +22,11 @@ use stackable_druid_crd::{ security::{resolve_authentication_classes, DruidTlsSecurity}, CommonRoleGroupConfig, DeepStorageSpec, DruidCluster, DruidRole, APP_NAME, AUTH_AUTHORIZER_OPA_URI, CERTS_DIR, CREDENTIALS_SECRET_PROPERTY, DRUID_CONFIG_DIRECTORY, - DS_BUCKET, EXTENSIONS_LOADLIST, HDFS_CONFIG_DIRECTORY, JVM_CONFIG, LOG4J2_CONFIG, - RUNTIME_PROPS, RW_CONFIG_DIRECTORY, S3_ENDPOINT_URL, S3_PATH_STYLE_ACCESS, S3_SECRET_DIR_NAME, - ZOOKEEPER_CONNECTION_STRING, + DS_BUCKET, EXTENSIONS_LOADLIST, HDFS_CONFIG_DIRECTORY, JVM_CONFIG, LOG_CONFIG_DIRECTORY, + LOG_DIR, LOG_VOLUME_SIZE_IN_MIB, RUNTIME_PROPS, RW_CONFIG_DIRECTORY, S3_ENDPOINT_URL, + S3_PATH_STYLE_ACCESS, S3_SECRET_DIR_NAME, ZOOKEEPER_CONNECTION_STRING, }; +use stackable_druid_crd::{build_recommended_labels, Container}; use stackable_operator::{ builder::{ ConfigMapBuilder, ContainerBuilder, ObjectMetaBuilder, PodBuilder, @@ -43,7 +44,7 @@ use stackable_operator::{ apps::v1::{StatefulSet, StatefulSetSpec}, core::v1::{ConfigMap, EnvVar, Service, ServiceSpec}, }, - apimachinery::pkg::apis::meta::v1::LabelSelector, + apimachinery::pkg::{api::resource::Quantity, apis::meta::v1::LabelSelector}, }, kube::{ runtime::{controller::Action, reflector::ObjectRef}, @@ -53,6 +54,13 @@ use stackable_operator::{ logging::controller::ReconcilerError, product_config::{types::PropertyNameKind, ProductConfigManager}, product_config_utils::{transform_all_roles_to_config, validate_all_roles_and_groups_config}, + product_logging::{ + self, + spec::{ + ConfigMapLogConfig, ContainerLogConfig, ContainerLogConfigChoice, + CustomContainerLogConfig, + }, + }, role_utils::RoleGroupRef, }; use std::{ @@ -200,6 +208,15 @@ pub enum Error { "failed to access bind credentials although they are required for LDAP to work" ))] LdapBindCredentialsAreRequired, + #[snafu(display("failed to resolve the Vector aggregator address"))] + ResolveVectorAggregatorAddress { + source: crate::product_logging::Error, + }, + #[snafu(display("failed to add the logging configuration to the ConfigMap [{cm_name}]"))] + InvalidLoggingConfig { + source: crate::product_logging::Error, + cm_name: String, + }, } type Result = std::result::Result; @@ -234,6 +251,10 @@ pub async fn reconcile_druid(druid: Arc, ctx: Arc) -> Result< cm_name: zk_confmap.clone(), })?; + let vector_aggregator_address = resolve_vector_aggregator_address(&druid, client) + .await + .context(ResolveVectorAggregatorAddressSnafu)?; + // Assemble the OPA connection string from the discovery and the given path, if a spec is given. let opa_connstr = if let Some(DruidAuthorization { opa: opa_config }) = &druid.spec.cluster_config.authorization @@ -348,6 +369,7 @@ pub async fn reconcile_druid(druid: Arc, ctx: Arc) -> Result< rolegroup_config, &merged_rolegroup_config, &zk_connstr, + vector_aggregator_address.as_deref(), opa_connstr.as_deref(), s3_conn.as_ref(), deep_storage_bucket_name.as_deref(), @@ -456,6 +478,7 @@ fn build_rolegroup_config_map( rolegroup_config: &HashMap>, merged_rolegroup_config: &CommonRoleGroupConfig, zk_connstr: &str, + vector_aggregator_address: Option<&str>, opa_connstr: Option<&str>, s3_conn: Option<&S3ConnectionSpec>, deep_storage_bucket_name: Option<&str>, @@ -552,12 +575,6 @@ fn build_rolegroup_config_map( // if this is changed in the future, make sure to respect overrides! cm_conf_data.insert(JVM_CONFIG.to_string(), jvm_config); } - PropertyNameKind::File(file_name) if file_name == LOG4J2_CONFIG => { - let log_config = get_log4j_config(&role); - // the user can set overrides in the config, but currently they have no effect - // if this is changed in the future, make sure to respect overrides! - cm_conf_data.insert(LOG4J2_CONFIG.to_string(), log_config); - } _ => {} } } @@ -581,6 +598,17 @@ fn build_rolegroup_config_map( for (filename, file_content) in cm_conf_data.iter() { config_map_builder.add_data(filename, file_content); } + + extend_role_group_config_map( + rolegroup, + vector_aggregator_address, + &merged_rolegroup_config.logging, + &mut config_map_builder, + ) + .context(InvalidLoggingConfigSnafu { + cm_name: rolegroup.object_name(), + })?; + config_map_builder .build() .with_context(|_| BuildRoleGroupConfigSnafu { @@ -649,11 +677,17 @@ fn build_rolegroup_statefulset( })?; // init container builder - let mut cb_prepare = ContainerBuilder::new("prepare") - .context(FailedContainerBuilderCreationSnafu { name: "prepare" })?; + let mut cb_prepare = ContainerBuilder::new(&Container::Prepare.to_string()).context( + FailedContainerBuilderCreationSnafu { + name: Container::Prepare.to_string(), + }, + )?; // druid container builder - let mut cb_druid = ContainerBuilder::new(APP_NAME) - .context(FailedContainerBuilderCreationSnafu { name: APP_NAME })?; + let mut cb_druid = ContainerBuilder::new(&Container::Druid.to_string()).context( + FailedContainerBuilderCreationSnafu { + name: Container::Druid.to_string(), + }, + )?; // init pod builder let mut pb = PodBuilder::new(); pb.node_selector_opt(merged_rolegroup_config.selector.to_owned()); @@ -678,6 +712,13 @@ fn build_rolegroup_statefulset( .context(FailedToInitializeSecurityContextSnafu)?; add_s3_volume_and_volume_mounts(s3_conn, &mut cb_druid, &mut pb)?; add_config_volume_and_volume_mounts(rolegroup_ref, &mut cb_druid, &mut pb); + add_log_config_volume_and_volume_mounts( + rolegroup_ref, + merged_rolegroup_config, + &mut cb_druid, + &mut pb, + ); + add_log_volume_and_volume_mounts(&mut cb_druid, &mut cb_prepare, &mut pb); add_hdfs_cm_volume_and_volume_mounts( &druid.spec.cluster_config.deep_storage, &mut cb_druid, @@ -687,7 +728,23 @@ fn build_rolegroup_statefulset( .resources .update_volumes_and_volume_mounts(&mut cb_druid, &mut pb); - let prepare_container_command = druid_tls_security.build_tls_key_stores_cmd(); + let mut prepare_container_command = vec![]; + + if let Some(ContainerLogConfig { + choice: Some(ContainerLogConfigChoice::Automatic(log_config)), + }) = merged_rolegroup_config + .logging + .containers + .get(&Container::Prepare) + { + prepare_container_command.push(product_logging::framework::capture_shell_output( + LOG_DIR, + &Container::Prepare.to_string(), + log_config, + )); + } + + prepare_container_command.extend(druid_tls_security.build_tls_key_stores_cmd()); cb_prepare .image_from_product_image(resolved_product_image) @@ -744,6 +801,18 @@ fn build_rolegroup_statefulset( .build(), ); + if merged_rolegroup_config.logging.enable_vector_agent { + pb.add_container(product_logging::framework::vector_container( + resolved_product_image, + "config", + "log", + merged_rolegroup_config + .logging + .containers + .get(&Container::Vector), + )); + } + Ok(StatefulSet { metadata: ObjectMetaBuilder::new() .name_and_namespace(druid) @@ -850,6 +919,53 @@ fn add_config_volume_and_volume_mounts( ); } +fn add_log_config_volume_and_volume_mounts( + rolegroup_ref: &RoleGroupRef, + merged_rolegroup_config: &CommonRoleGroupConfig, + cb_druid: &mut ContainerBuilder, + pb: &mut PodBuilder, +) { + cb_druid.add_volume_mount("log-config", LOG_CONFIG_DIRECTORY); + + let config_map = if let Some(ContainerLogConfig { + choice: + Some(ContainerLogConfigChoice::Custom(CustomContainerLogConfig { + custom: ConfigMapLogConfig { config_map }, + })), + }) = merged_rolegroup_config + .logging + .containers + .get(&Container::Druid) + { + config_map.into() + } else { + rolegroup_ref.object_name() + }; + + pb.add_volume( + VolumeBuilder::new("log-config") + .with_config_map(config_map) + .build(), + ); +} + +fn add_log_volume_and_volume_mounts( + cb_druid: &mut ContainerBuilder, + cb_prepare: &mut ContainerBuilder, + pb: &mut PodBuilder, +) { + cb_druid.add_volume_mount("log", LOG_DIR); + cb_prepare.add_volume_mount("log", LOG_DIR); + pb.add_volume( + VolumeBuilder::new("log") + .with_empty_dir( + Some(""), + Some(Quantity(format!("{LOG_VOLUME_SIZE_IN_MIB}Mi"))), + ) + .build(), + ); +} + fn add_s3_volume_and_volume_mounts( s3_conn: Option<&S3ConnectionSpec>, cb_druid: &mut ContainerBuilder, @@ -1003,6 +1119,7 @@ mod test { None, None, None, + None, &druid_tls_security, &ldap_settings, ) diff --git a/rust/operator-binary/src/main.rs b/rust/operator-binary/src/main.rs index 85f8651a..5f752d4f 100644 --- a/rust/operator-binary/src/main.rs +++ b/rust/operator-binary/src/main.rs @@ -3,6 +3,7 @@ mod discovery; mod druid_controller; mod extensions; mod internal_secret; +mod product_logging; use std::sync::Arc; diff --git a/rust/operator-binary/src/product_logging.rs b/rust/operator-binary/src/product_logging.rs new file mode 100644 index 00000000..14fd34b9 --- /dev/null +++ b/rust/operator-binary/src/product_logging.rs @@ -0,0 +1,125 @@ +use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_druid_crd::{ + Container, DruidCluster, DRUID_LOG_FILE, LOG4J2_CONFIG, LOG_DIR, + MAX_DRUID_LOG_FILES_SIZE_IN_MIB, +}; +use stackable_operator::{ + builder::ConfigMapBuilder, + client::Client, + k8s_openapi::api::core::v1::ConfigMap, + kube::ResourceExt, + product_logging::{ + self, + spec::{ContainerLogConfig, ContainerLogConfigChoice, Logging}, + }, + role_utils::RoleGroupRef, +}; + +#[derive(Snafu, Debug)] +pub enum Error { + #[snafu(display("object has no namespace"))] + ObjectHasNoNamespace, + #[snafu(display("failed to retrieve the ConfigMap {cm_name}"))] + ConfigMapNotFound { + source: stackable_operator::error::Error, + cm_name: String, + }, + #[snafu(display("failed to retrieve the entry {entry} for ConfigMap {cm_name}"))] + MissingConfigMapEntry { + entry: &'static str, + cm_name: String, + }, + #[snafu(display("crd validation failure"))] + CrdValidationFailure { source: stackable_druid_crd::Error }, + #[snafu(display("vectorAggregatorConfigMapName must be set"))] + MissingVectorAggregatorAddress, +} + +type Result = std::result::Result; + +const VECTOR_AGGREGATOR_CM_ENTRY: &str = "ADDRESS"; +const CONSOLE_CONVERSION_PATTERN: &str = "%d{ISO8601} %p [%t] %c - %m%n"; + +/// Return the address of the Vector aggregator if the corresponding ConfigMap name is given in the +/// cluster spec +pub async fn resolve_vector_aggregator_address( + druid: &DruidCluster, + client: &Client, +) -> Result> { + let vector_aggregator_address = if let Some(vector_aggregator_config_map_name) = &druid + .spec + .cluster_config + .vector_aggregator_config_map_name + .as_ref() + { + let vector_aggregator_address = client + .get::( + vector_aggregator_config_map_name, + druid + .namespace() + .as_deref() + .context(ObjectHasNoNamespaceSnafu)?, + ) + .await + .context(ConfigMapNotFoundSnafu { + cm_name: vector_aggregator_config_map_name.to_string(), + })? + .data + .and_then(|mut data| data.remove(VECTOR_AGGREGATOR_CM_ENTRY)) + .context(MissingConfigMapEntrySnafu { + entry: VECTOR_AGGREGATOR_CM_ENTRY, + cm_name: vector_aggregator_config_map_name.to_string(), + })?; + Some(vector_aggregator_address) + } else { + None + }; + + Ok(vector_aggregator_address) +} + +/// Extend the role group ConfigMap with logging and Vector configurations +pub fn extend_role_group_config_map( + rolegroup: &RoleGroupRef, + vector_aggregator_address: Option<&str>, + logging: &Logging, + cm_builder: &mut ConfigMapBuilder, +) -> Result<()> { + if let Some(ContainerLogConfig { + choice: Some(ContainerLogConfigChoice::Automatic(log_config)), + }) = logging.containers.get(&Container::Druid) + { + cm_builder.add_data( + LOG4J2_CONFIG, + product_logging::framework::create_log4j2_config( + &format!("{LOG_DIR}/{container}", container = Container::Druid), + DRUID_LOG_FILE, + MAX_DRUID_LOG_FILES_SIZE_IN_MIB, + CONSOLE_CONVERSION_PATTERN, + log_config, + ), + ); + } + + let vector_log_config = if let Some(ContainerLogConfig { + choice: Some(ContainerLogConfigChoice::Automatic(log_config)), + }) = logging.containers.get(&Container::Vector) + { + Some(log_config) + } else { + None + }; + + if logging.enable_vector_agent { + cm_builder.add_data( + product_logging::framework::VECTOR_CONFIG_FILE, + product_logging::framework::create_vector_config( + rolegroup, + vector_aggregator_address.context(MissingVectorAggregatorAddressSnafu)?, + vector_log_config, + ), + ); + } + + Ok(()) +} diff --git a/tests/templates/kuttl/resources/20-assert.yaml b/tests/templates/kuttl/resources/20-assert.yaml index a0ecb6c8..fdef666e 100644 --- a/tests/templates/kuttl/resources/20-assert.yaml +++ b/tests/templates/kuttl/resources/20-assert.yaml @@ -77,6 +77,8 @@ spec: name: druid-resources-historical-default - name: rwconfig emptyDir: {} + - name: log-config + - name: log - name: segment-cache emptyDir: sizeLimit: 2G diff --git a/tests/templates/kuttl/smoke/03-assert.yaml b/tests/templates/kuttl/smoke/03-assert.yaml index a37d231d..91a06143 100644 --- a/tests/templates/kuttl/smoke/03-assert.yaml +++ b/tests/templates/kuttl/smoke/03-assert.yaml @@ -47,6 +47,8 @@ spec: name: druid-historical-default - name: rwconfig emptyDir: {} + - name: log-config + - name: log - name: hdfs configMap: name: druid-hdfs diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 3032d9b8..ec5ee7c5 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -2,11 +2,11 @@ dimensions: - name: druid values: - - 0.23.0-stackable0.3.0 - - 24.0.0-stackable0.3.0 + - 0.23.0-stackable23.4.0-rc2 + - 24.0.0-stackable23.4.0-rc2 - name: druid-latest values: - - 24.0.0-stackable0.3.0 + - 24.0.0-stackable23.4.0-rc2 - name: zookeeper values: - 3.7.0-stackable0.9.0 From 7eded34cdc01e5729597290c63856f70a841439c Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 14:53:17 +0100 Subject: [PATCH 08/27] Add logging test --- .../templates/kuttl/logging/00-assert.yaml.j2 | 10 + ...tor-aggregator-discovery-configmap.yaml.j2 | 9 + tests/templates/kuttl/logging/01-assert.yaml | 12 + .../01-install-druid-vector-aggregator.yaml | 17 + tests/templates/kuttl/logging/02-assert.yaml | 17 + .../kuttl/logging/02-install-zk.yaml.j2 | 37 ++ tests/templates/kuttl/logging/03-assert.yaml | 28 ++ .../kuttl/logging/03-install-hdfs.yaml.j2 | 41 +++ tests/templates/kuttl/logging/04-assert.yaml | 84 +++++ .../kuttl/logging/04-install-druid.yaml.j2 | 337 ++++++++++++++++++ tests/templates/kuttl/logging/05-assert.yaml | 12 + .../logging/05-install-druid-test-runner.yaml | 22 ++ tests/templates/kuttl/logging/06-assert.yaml | 7 + .../logging/06-test-log-aggregation.yaml | 6 + .../druid-vector-aggregator-values.yaml.j2 | 211 +++++++++++ .../kuttl/logging/test_log_aggregation.py | 41 +++ tests/test-definition.yaml | 17 +- 17 files changed, 902 insertions(+), 6 deletions(-) create mode 100644 tests/templates/kuttl/logging/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/logging/00-install-vector-aggregator-discovery-configmap.yaml.j2 create mode 100644 tests/templates/kuttl/logging/01-assert.yaml create mode 100644 tests/templates/kuttl/logging/01-install-druid-vector-aggregator.yaml create mode 100644 tests/templates/kuttl/logging/02-assert.yaml create mode 100644 tests/templates/kuttl/logging/02-install-zk.yaml.j2 create mode 100644 tests/templates/kuttl/logging/03-assert.yaml create mode 100644 tests/templates/kuttl/logging/03-install-hdfs.yaml.j2 create mode 100644 tests/templates/kuttl/logging/04-assert.yaml create mode 100644 tests/templates/kuttl/logging/04-install-druid.yaml.j2 create mode 100644 tests/templates/kuttl/logging/05-assert.yaml create mode 100644 tests/templates/kuttl/logging/05-install-druid-test-runner.yaml create mode 100644 tests/templates/kuttl/logging/06-assert.yaml create mode 100644 tests/templates/kuttl/logging/06-test-log-aggregation.yaml create mode 100644 tests/templates/kuttl/logging/druid-vector-aggregator-values.yaml.j2 create mode 100755 tests/templates/kuttl/logging/test_log_aggregation.py diff --git a/tests/templates/kuttl/logging/00-assert.yaml.j2 b/tests/templates/kuttl/logging/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/logging/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/logging/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/logging/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/logging/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/logging/01-assert.yaml b/tests/templates/kuttl/logging/01-assert.yaml new file mode 100644 index 00000000..291d6e25 --- /dev/null +++ b/tests/templates/kuttl/logging/01-assert.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-vector-aggregator +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/logging/01-install-druid-vector-aggregator.yaml b/tests/templates/kuttl/logging/01-install-druid-vector-aggregator.yaml new file mode 100644 index 00000000..ff1d0187 --- /dev/null +++ b/tests/templates/kuttl/logging/01-install-druid-vector-aggregator.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: >- + helm install druid-vector-aggregator vector + --namespace $NAMESPACE + --version 0.19.2 + --repo https://helm.vector.dev + --values druid-vector-aggregator-values.yaml +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: druid-vector-aggregator-discovery +data: + ADDRESS: druid-vector-aggregator:6123 diff --git a/tests/templates/kuttl/logging/02-assert.yaml b/tests/templates/kuttl/logging/02-assert.yaml new file mode 100644 index 00000000..4998bcdd --- /dev/null +++ b/tests/templates/kuttl/logging/02-assert.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 300 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-zk-server-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: hdfs-znode diff --git a/tests/templates/kuttl/logging/02-install-zk.yaml.j2 b/tests/templates/kuttl/logging/02-install-zk.yaml.j2 new file mode 100644 index 00000000..a6a17bc5 --- /dev/null +++ b/tests/templates/kuttl/logging/02-install-zk.yaml.j2 @@ -0,0 +1,37 @@ +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperCluster +metadata: + name: druid-zk +spec: + image: + productVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[0] }}" + stackableVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + logging: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} + servers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 1 +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperZnode +metadata: + name: druid-znode +spec: + clusterRef: + name: druid-zk +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperZnode +metadata: + name: hdfs-znode +spec: + clusterRef: + name: druid-zk diff --git a/tests/templates/kuttl/logging/03-assert.yaml b/tests/templates/kuttl/logging/03-assert.yaml new file mode 100644 index 00000000..f486d011 --- /dev/null +++ b/tests/templates/kuttl/logging/03-assert.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 300 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-hdfs-namenode-default +status: + readyReplicas: 2 + replicas: 2 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-hdfs-journalnode-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-hdfs-datanode-default +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/logging/03-install-hdfs.yaml.j2 b/tests/templates/kuttl/logging/03-install-hdfs.yaml.j2 new file mode 100644 index 00000000..63ab44e3 --- /dev/null +++ b/tests/templates/kuttl/logging/03-install-hdfs.yaml.j2 @@ -0,0 +1,41 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +metadata: + name: druid-hdfs +timeout: 600 +--- +apiVersion: hdfs.stackable.tech/v1alpha1 +kind: HdfsCluster +metadata: + name: druid-hdfs +spec: + image: + productVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[0] }}" + stackableVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[1] }}" + zookeeperConfigMapName: hdfs-znode +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} + dfsReplication: 1 + nameNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 2 + dataNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 1 + journalNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 1 diff --git a/tests/templates/kuttl/logging/04-assert.yaml b/tests/templates/kuttl/logging/04-assert.yaml new file mode 100644 index 00000000..24a89f8f --- /dev/null +++ b/tests/templates/kuttl/logging/04-assert.yaml @@ -0,0 +1,84 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-druid-broker-automatic-log-config +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-druid-broker-custom-log-config +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-druid-coordinator-automatic-log-config +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-druid-coordinator-custom-log-config +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-druid-historical-automatic-log-config +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-druid-historical-custom-log-config +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-druid-middlemanager-automatic-log-config +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-druid-middlemanager-custom-log-config +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-druid-router-automatic-log-config +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-druid-router-custom-log-config +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/logging/04-install-druid.yaml.j2 b/tests/templates/kuttl/logging/04-install-druid.yaml.j2 new file mode 100644 index 00000000..d7aa6fd3 --- /dev/null +++ b/tests/templates/kuttl/logging/04-install-druid.yaml.j2 @@ -0,0 +1,337 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +metadata: + name: install-druid +timeout: 600 +--- +apiVersion: druid.stackable.tech/v1alpha1 +kind: DruidCluster +metadata: + name: test-druid +spec: + image: + productVersion: "{{ test_scenario['values']['druid'].split('-stackable')[0] }}" + stackableVersion: "{{ test_scenario['values']['druid'].split('-stackable')[1] }}" + clusterConfig: + deepStorage: + hdfs: + configMapName: druid-hdfs + directory: /druid + metadataStorageDatabase: + dbType: derby + connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true + host: localhost + port: 1527 + vectorAggregatorConfigMapName: druid-vector-aggregator-discovery + zookeeperConfigMapName: druid-znode + brokers: + roleGroups: + automatic-log-config: + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + druid: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + prepare: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + custom-log-config: + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + druid: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + prepare: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + coordinators: + roleGroups: + automatic-log-config: + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + druid: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + prepare: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + custom-log-config: + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + druid: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + prepare: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + historicals: + roleGroups: + automatic-log-config: + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + druid: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + prepare: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + custom-log-config: + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + druid: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + prepare: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + middleManagers: + roleGroups: + automatic-log-config: + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + druid: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + prepare: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + custom-log-config: + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + druid: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + prepare: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + routers: + roleGroups: + automatic-log-config: + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + druid: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + prepare: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + custom-log-config: + replicas: 1 + config: + logging: + enableVectorAgent: true + containers: + druid: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + prepare: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO + vector: + console: + level: INFO + file: + level: INFO + loggers: + ROOT: + level: INFO diff --git a/tests/templates/kuttl/logging/05-assert.yaml b/tests/templates/kuttl/logging/05-assert.yaml new file mode 100644 index 00000000..1e03d73b --- /dev/null +++ b/tests/templates/kuttl/logging/05-assert.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 300 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-test-runner +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/logging/05-install-druid-test-runner.yaml b/tests/templates/kuttl/logging/05-install-druid-test-runner.yaml new file mode 100644 index 00000000..85f3c5f0 --- /dev/null +++ b/tests/templates/kuttl/logging/05-install-druid-test-runner.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-test-runner + labels: + app: druid-test-runner +spec: + replicas: 1 + selector: + matchLabels: + app: druid-test-runner + template: + metadata: + labels: + app: druid-test-runner + spec: + containers: + - name: druid-test-runner + image: docker.stackable.tech/stackable/testing-tools:0.1.0-stackable0.1.0 + stdin: true + tty: true diff --git a/tests/templates/kuttl/logging/06-assert.yaml b/tests/templates/kuttl/logging/06-assert.yaml new file mode 100644 index 00000000..36441c1d --- /dev/null +++ b/tests/templates/kuttl/logging/06-assert.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: + - script: >- + kubectl exec --namespace=$NAMESPACE druid-test-runner-0 -- + python /tmp/test_log_aggregation.py -n $NAMESPACE diff --git a/tests/templates/kuttl/logging/06-test-log-aggregation.yaml b/tests/templates/kuttl/logging/06-test-log-aggregation.yaml new file mode 100644 index 00000000..bba950db --- /dev/null +++ b/tests/templates/kuttl/logging/06-test-log-aggregation.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + kubectl cp ./test_log_aggregation.py $NAMESPACE/druid-test-runner-0:/tmp diff --git a/tests/templates/kuttl/logging/druid-vector-aggregator-values.yaml.j2 b/tests/templates/kuttl/logging/druid-vector-aggregator-values.yaml.j2 new file mode 100644 index 00000000..d362970f --- /dev/null +++ b/tests/templates/kuttl/logging/druid-vector-aggregator-values.yaml.j2 @@ -0,0 +1,211 @@ +--- +role: Aggregator +service: + ports: + - name: api + port: 8686 + protocol: TCP + targetPort: 8686 + - name: vector + port: 6123 + protocol: TCP + targetPort: 6000 +customConfig: + api: + address: 0.0.0.0:8686 + enabled: true + sources: + vector: + address: 0.0.0.0:6000 + type: vector + version: "2" + transforms: + automaticLogConfigBrokerDruid: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-broker-automatic-log-config-0" && + .container == "druid" + automaticLogConfigBrokerPrepare: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-broker-automatic-log-config-0" && + .container == "prepare" + automaticLogConfigBrokerVector: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-broker-automatic-log-config-0" && + .container == "vector" + customLogConfigBrokerDruid: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-broker-custom-log-config-0" && + .container == "druid" + customLogConfigBrokerPrepare: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-broker-custom-log-config-0" && + .container == "prepare" + customLogConfigBrokerVector: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-broker-custom-log-config-0" && + .container == "vector" + automaticLogConfigCoordinatorDruid: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-coordinator-automatic-log-config-0" && + .container == "druid" + automaticLogConfigCoordinatorPrepare: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-coordinator-automatic-log-config-0" && + .container == "prepare" + automaticLogConfigCoordinatorVector: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-coordinator-automatic-log-config-0" && + .container == "vector" + customLogConfigCoordinatorDruid: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-coordinator-custom-log-config-0" && + .container == "druid" + customLogConfigCoordinatorPrepare: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-coordinator-custom-log-config-0" && + .container == "prepare" + customLogConfigCoordinatorVector: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-coordinator-custom-log-config-0" && + .container == "vector" + automaticLogConfigHistoricalDruid: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-historical-automatic-log-config-0" && + .container == "druid" + automaticLogConfigHistoricalPrepare: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-historical-automatic-log-config-0" && + .container == "prepare" + automaticLogConfigHistoricalVector: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-historical-automatic-log-config-0" && + .container == "vector" + customLogConfigHistoricalDruid: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-historical-custom-log-config-0" && + .container == "druid" + customLogConfigHistoricalPrepare: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-historical-custom-log-config-0" && + .container == "prepare" + customLogConfigHistoricalVector: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-historical-custom-log-config-0" && + .container == "vector" + automaticLogConfigMiddlemanagerDruid: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-middlemanager-automatic-log-config-0" && + .container == "druid" + automaticLogConfigMiddlemanagerPrepare: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-middlemanager-automatic-log-config-0" && + .container == "prepare" + automaticLogConfigMiddlemanagerVector: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-middlemanager-automatic-log-config-0" && + .container == "vector" + customLogConfigMiddlemanagerDruid: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-middlemanager-custom-log-config-0" && + .container == "druid" + customLogConfigMiddlemanagerPrepare: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-middlemanager-custom-log-config-0" && + .container == "prepare" + customLogConfigMiddlemanagerVector: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-middlemanager-custom-log-config-0" && + .container == "vector" + automaticLogConfigRouterDruid: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-router-automatic-log-config-0" && + .container == "druid" + automaticLogConfigRouterPrepare: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-router-automatic-log-config-0" && + .container == "prepare" + automaticLogConfigRouterVector: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-router-automatic-log-config-0" && + .container == "vector" + customLogConfigRouterDruid: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-router-custom-log-config-0" && + .container == "druid" + customLogConfigRouterPrepare: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-router-custom-log-config-0" && + .container == "prepare" + customLogConfigRouterVector: + type: filter + inputs: [vector] + condition: >- + .pod == "test-druid-router-custom-log-config-0" && + .container == "vector" + sinks: + out: + inputs: [automaticLogConfig*, customLogConfig*] +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + type: vector + address: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% else %} + type: blackhole +{% endif %} diff --git a/tests/templates/kuttl/logging/test_log_aggregation.py b/tests/templates/kuttl/logging/test_log_aggregation.py new file mode 100755 index 00000000..b8ecf18a --- /dev/null +++ b/tests/templates/kuttl/logging/test_log_aggregation.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 +import requests + + +def check_processed_events(): + response = requests.post( + 'http://druid-vector-aggregator:8686/graphql', + json={ + 'query': """ + { + transforms(first:100) { + nodes { + componentId + metrics { + processedEventsTotal { + processedEventsTotal + } + } + } + } + } + """ + } + ) + + assert response.status_code == 200, \ + 'Cannot access the API of the vector aggregator.' + + result = response.json() + + transforms = result['data']['transforms']['nodes'] + for transform in transforms: + processedEvents = transform['metrics']['processedEventsTotal']['processedEventsTotal'] + componentId = transform['componentId'] + assert processedEvents > 0, \ + f'No events were processed in "{componentId}".' + + +if __name__ == '__main__': + check_processed_events() + print('Test successful!') diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index ec5ee7c5..fd3df2fc 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -9,20 +9,20 @@ dimensions: - 24.0.0-stackable23.4.0-rc2 - name: zookeeper values: - - 3.7.0-stackable0.9.0 - - 3.8.0-stackable0.9.0 + - 3.7.0-stackable23.4.0-rc2 + - 3.8.0-stackable23.4.0-rc2 - name: zookeeper-latest values: - - 3.8.0-stackable0.8.0 + - 3.8.0-stackable23.4.0-rc2 - name: opa values: - - 0.45.0-stackable0.3.0 + - 0.45.0-stackable23.4.0-rc2 - name: hadoop values: - - 3.3.4-stackable0.3.0 + - 3.3.4-stackable23.4.0-rc1 - name: hadoop-latest values: - - 3.3.4-stackable0.3.0 + - 3.3.4-stackable23.4.0-rc1 - name: s3-use-tls values: - "true" @@ -94,3 +94,8 @@ tests: - hadoop-latest - ldap-use-tls - ldap-no-bind-credentials + - name: logging + dimensions: + - druid + - hadoop + - zookeeper-latest From c49233c0289485f7305f5a33b3f885cddeaad5ae Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 15:08:24 +0100 Subject: [PATCH 09/27] Enable log aggregation in authorizer test --- .../templates/kuttl/authorizer/00-assert.yaml | 17 --------- .../kuttl/authorizer/00-assert.yaml.j2 | 10 ++++++ ...tor-aggregator-discovery-configmap.yaml.j2 | 9 +++++ .../templates/kuttl/authorizer/01-assert.yaml | 15 ++++++-- ...stall-zk.yaml.j2 => 01-install-zk.yaml.j2} | 8 +++++ .../templates/kuttl/authorizer/02-assert.yaml | 28 ++------------- ...all-opa.yaml.j2 => 02-install-opa.yaml.j2} | 0 .../templates/kuttl/authorizer/03-assert.yaml | 26 +++----------- ...l-hdfs.yaml.j2 => 03-install-hdfs.yaml.j2} | 12 +++++++ .../templates/kuttl/authorizer/04-assert.yaml | 36 +++++++++++++++++-- ...druid.yaml.j2 => 04-install-druid.yaml.j2} | 18 ++++++++++ .../templates/kuttl/authorizer/05-assert.yaml | 12 +++++-- ...ontainer.yaml => 05-checks-container.yaml} | 0 .../templates/kuttl/authorizer/06-assert.yaml | 6 ++++ .../{05-authcheck.yaml => 06-authcheck.yaml} | 0 .../kuttl/logging/03-install-hdfs.yaml.j2 | 2 +- 16 files changed, 128 insertions(+), 71 deletions(-) delete mode 100644 tests/templates/kuttl/authorizer/00-assert.yaml create mode 100644 tests/templates/kuttl/authorizer/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/authorizer/00-install-vector-aggregator-discovery-configmap.yaml.j2 rename tests/templates/kuttl/authorizer/{00-install-zk.yaml.j2 => 01-install-zk.yaml.j2} (71%) rename tests/templates/kuttl/authorizer/{01-install-opa.yaml.j2 => 02-install-opa.yaml.j2} (100%) rename tests/templates/kuttl/authorizer/{02-install-hdfs.yaml.j2 => 03-install-hdfs.yaml.j2} (58%) rename tests/templates/kuttl/authorizer/{03-install-druid.yaml.j2 => 04-install-druid.yaml.j2} (80%) rename tests/templates/kuttl/authorizer/{04-checks-container.yaml => 05-checks-container.yaml} (100%) create mode 100644 tests/templates/kuttl/authorizer/06-assert.yaml rename tests/templates/kuttl/authorizer/{05-authcheck.yaml => 06-authcheck.yaml} (100%) diff --git a/tests/templates/kuttl/authorizer/00-assert.yaml b/tests/templates/kuttl/authorizer/00-assert.yaml deleted file mode 100644 index 4998bcdd..00000000 --- a/tests/templates/kuttl/authorizer/00-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -timeout: 300 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-zk-server-default -status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: hdfs-znode diff --git a/tests/templates/kuttl/authorizer/00-assert.yaml.j2 b/tests/templates/kuttl/authorizer/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/authorizer/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/authorizer/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/authorizer/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/authorizer/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/authorizer/01-assert.yaml b/tests/templates/kuttl/authorizer/01-assert.yaml index 7a81c130..4998bcdd 100644 --- a/tests/templates/kuttl/authorizer/01-assert.yaml +++ b/tests/templates/kuttl/authorizer/01-assert.yaml @@ -2,5 +2,16 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert timeout: 300 -commands: - - script: kubectl -n $NAMESPACE rollout status daemonset test-opa-server-default --timeout 300s +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-zk-server-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: hdfs-znode diff --git a/tests/templates/kuttl/authorizer/00-install-zk.yaml.j2 b/tests/templates/kuttl/authorizer/01-install-zk.yaml.j2 similarity index 71% rename from tests/templates/kuttl/authorizer/00-install-zk.yaml.j2 rename to tests/templates/kuttl/authorizer/01-install-zk.yaml.j2 index 9958d9fb..a6a17bc5 100644 --- a/tests/templates/kuttl/authorizer/00-install-zk.yaml.j2 +++ b/tests/templates/kuttl/authorizer/01-install-zk.yaml.j2 @@ -7,7 +7,15 @@ spec: image: productVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + logging: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} servers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/authorizer/02-assert.yaml b/tests/templates/kuttl/authorizer/02-assert.yaml index 7138c1b0..7a81c130 100644 --- a/tests/templates/kuttl/authorizer/02-assert.yaml +++ b/tests/templates/kuttl/authorizer/02-assert.yaml @@ -1,28 +1,6 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -timeout: 600 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-hdfs-namenode-default -status: - readyReplicas: 2 - replicas: 2 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-hdfs-journalnode-default -status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-hdfs-datanode-default -status: - readyReplicas: 1 - replicas: 1 +timeout: 300 +commands: + - script: kubectl -n $NAMESPACE rollout status daemonset test-opa-server-default --timeout 300s diff --git a/tests/templates/kuttl/authorizer/01-install-opa.yaml.j2 b/tests/templates/kuttl/authorizer/02-install-opa.yaml.j2 similarity index 100% rename from tests/templates/kuttl/authorizer/01-install-opa.yaml.j2 rename to tests/templates/kuttl/authorizer/02-install-opa.yaml.j2 diff --git a/tests/templates/kuttl/authorizer/03-assert.yaml b/tests/templates/kuttl/authorizer/03-assert.yaml index a3331d5c..7138c1b0 100644 --- a/tests/templates/kuttl/authorizer/03-assert.yaml +++ b/tests/templates/kuttl/authorizer/03-assert.yaml @@ -6,31 +6,15 @@ timeout: 600 apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-broker-default + name: druid-hdfs-namenode-default status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: derby-druid-coordinator-default -status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: derby-druid-historical-default -status: - readyReplicas: 1 - replicas: 1 + readyReplicas: 2 + replicas: 2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-middlemanager-default + name: druid-hdfs-journalnode-default status: readyReplicas: 1 replicas: 1 @@ -38,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-router-default + name: druid-hdfs-datanode-default status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/authorizer/02-install-hdfs.yaml.j2 b/tests/templates/kuttl/authorizer/03-install-hdfs.yaml.j2 similarity index 58% rename from tests/templates/kuttl/authorizer/02-install-hdfs.yaml.j2 rename to tests/templates/kuttl/authorizer/03-install-hdfs.yaml.j2 index 6f512cc9..851577fe 100644 --- a/tests/templates/kuttl/authorizer/02-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/authorizer/03-install-hdfs.yaml.j2 @@ -13,17 +13,29 @@ spec: image: productVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: hdfs-znode dfsReplication: 1 nameNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 2 dataNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 journalNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/authorizer/04-assert.yaml b/tests/templates/kuttl/authorizer/04-assert.yaml index dc085bb1..a3331d5c 100644 --- a/tests/templates/kuttl/authorizer/04-assert.yaml +++ b/tests/templates/kuttl/authorizer/04-assert.yaml @@ -1,12 +1,44 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -timeout: 300 +timeout: 600 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: checks + name: derby-druid-broker-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-coordinator-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-historical-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-middlemanager-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-router-default status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/authorizer/03-install-druid.yaml.j2 b/tests/templates/kuttl/authorizer/04-install-druid.yaml.j2 similarity index 80% rename from tests/templates/kuttl/authorizer/03-install-druid.yaml.j2 rename to tests/templates/kuttl/authorizer/04-install-druid.yaml.j2 index b4fcc71a..51991451 100644 --- a/tests/templates/kuttl/authorizer/03-install-druid.yaml.j2 +++ b/tests/templates/kuttl/authorizer/04-install-druid.yaml.j2 @@ -27,8 +27,14 @@ spec: connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true host: localhost port: 1527 +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: druid-znode brokers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} configOverrides: runtime.properties: &runtime-properties druid.auth.authenticatorChain: "[\"MyBasicMetadataAuthenticator\"]" @@ -56,24 +62,36 @@ spec: default: replicas: 1 coordinators: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} configOverrides: runtime.properties: *runtime-properties roleGroups: default: replicas: 1 historicals: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} configOverrides: runtime.properties: *runtime-properties roleGroups: default: replicas: 1 middleManagers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} configOverrides: runtime.properties: *runtime-properties roleGroups: default: replicas: 1 routers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} configOverrides: runtime.properties: *runtime-properties roleGroups: diff --git a/tests/templates/kuttl/authorizer/05-assert.yaml b/tests/templates/kuttl/authorizer/05-assert.yaml index 1f6e20c9..dc085bb1 100644 --- a/tests/templates/kuttl/authorizer/05-assert.yaml +++ b/tests/templates/kuttl/authorizer/05-assert.yaml @@ -1,6 +1,12 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -commands: - - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/authcheck.py derby-druid -timeout: 600 +timeout: 300 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: checks +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/authorizer/04-checks-container.yaml b/tests/templates/kuttl/authorizer/05-checks-container.yaml similarity index 100% rename from tests/templates/kuttl/authorizer/04-checks-container.yaml rename to tests/templates/kuttl/authorizer/05-checks-container.yaml diff --git a/tests/templates/kuttl/authorizer/06-assert.yaml b/tests/templates/kuttl/authorizer/06-assert.yaml new file mode 100644 index 00000000..1f6e20c9 --- /dev/null +++ b/tests/templates/kuttl/authorizer/06-assert.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: + - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/authcheck.py derby-druid +timeout: 600 diff --git a/tests/templates/kuttl/authorizer/05-authcheck.yaml b/tests/templates/kuttl/authorizer/06-authcheck.yaml similarity index 100% rename from tests/templates/kuttl/authorizer/05-authcheck.yaml rename to tests/templates/kuttl/authorizer/06-authcheck.yaml diff --git a/tests/templates/kuttl/logging/03-install-hdfs.yaml.j2 b/tests/templates/kuttl/logging/03-install-hdfs.yaml.j2 index 63ab44e3..851577fe 100644 --- a/tests/templates/kuttl/logging/03-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/logging/03-install-hdfs.yaml.j2 @@ -13,10 +13,10 @@ spec: image: productVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[1] }}" - zookeeperConfigMapName: hdfs-znode {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} + zookeeperConfigMapName: hdfs-znode dfsReplication: 1 nameNodes: config: From ede7f9a6812abe7c2f754e8f16bf9560e9006f36 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 15:22:55 +0100 Subject: [PATCH 10/27] Enable log aggregation in hdfs-deep-storage test --- .../kuttl/hdfs-deep-storage/00-assert.yaml | 17 --------- .../kuttl/hdfs-deep-storage/00-assert.yaml.j2 | 10 ++++++ ...tor-aggregator-discovery-configmap.yaml.j2 | 9 +++++ .../kuttl/hdfs-deep-storage/01-assert.yaml | 19 +++------- ...stall-zk.yaml.j2 => 01-install-zk.yaml.j2} | 8 +++++ .../kuttl/hdfs-deep-storage/02-assert.yaml | 26 +++----------- ...l-hdfs.yaml.j2 => 02-install-hdfs.yaml.j2} | 12 +++++++ .../kuttl/hdfs-deep-storage/03-assert.yaml | 36 +++++++++++++++++-- ...druid.yaml.j2 => 03-install-druid.yaml.j2} | 18 ++++++++++ .../kuttl/hdfs-deep-storage/04-assert.yaml | 10 ++++-- ...ontainer.yaml => 04-checks-container.yaml} | 0 .../kuttl/hdfs-deep-storage/05-assert.yaml | 2 +- ...4-healthcheck.yaml => 05-healthcheck.yaml} | 0 .../kuttl/hdfs-deep-storage/06-assert.yaml | 4 +-- ...stioncheck.yaml => 06-ingestioncheck.yaml} | 0 .../kuttl/hdfs-deep-storage/07-assert.yaml | 6 ++++ 16 files changed, 117 insertions(+), 60 deletions(-) delete mode 100644 tests/templates/kuttl/hdfs-deep-storage/00-assert.yaml create mode 100644 tests/templates/kuttl/hdfs-deep-storage/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/hdfs-deep-storage/00-install-vector-aggregator-discovery-configmap.yaml.j2 rename tests/templates/kuttl/hdfs-deep-storage/{00-install-zk.yaml.j2 => 01-install-zk.yaml.j2} (66%) rename tests/templates/kuttl/hdfs-deep-storage/{01-install-hdfs.yaml.j2 => 02-install-hdfs.yaml.j2} (63%) rename tests/templates/kuttl/hdfs-deep-storage/{02-install-druid.yaml.j2 => 03-install-druid.yaml.j2} (60%) rename tests/templates/kuttl/hdfs-deep-storage/{03-checks-container.yaml => 04-checks-container.yaml} (100%) rename tests/templates/kuttl/hdfs-deep-storage/{04-healthcheck.yaml => 05-healthcheck.yaml} (100%) rename tests/templates/kuttl/hdfs-deep-storage/{05-ingestioncheck.yaml => 06-ingestioncheck.yaml} (100%) create mode 100644 tests/templates/kuttl/hdfs-deep-storage/07-assert.yaml diff --git a/tests/templates/kuttl/hdfs-deep-storage/00-assert.yaml b/tests/templates/kuttl/hdfs-deep-storage/00-assert.yaml deleted file mode 100644 index 01aec7bb..00000000 --- a/tests/templates/kuttl/hdfs-deep-storage/00-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -timeout: 600 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-zk-server-default -status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: druid-znode diff --git a/tests/templates/kuttl/hdfs-deep-storage/00-assert.yaml.j2 b/tests/templates/kuttl/hdfs-deep-storage/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/hdfs-deep-storage/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/hdfs-deep-storage/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/hdfs-deep-storage/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/hdfs-deep-storage/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/hdfs-deep-storage/01-assert.yaml b/tests/templates/kuttl/hdfs-deep-storage/01-assert.yaml index 7138c1b0..01aec7bb 100644 --- a/tests/templates/kuttl/hdfs-deep-storage/01-assert.yaml +++ b/tests/templates/kuttl/hdfs-deep-storage/01-assert.yaml @@ -6,23 +6,12 @@ timeout: 600 apiVersion: apps/v1 kind: StatefulSet metadata: - name: druid-hdfs-namenode-default -status: - readyReplicas: 2 - replicas: 2 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-hdfs-journalnode-default + name: druid-zk-server-default status: readyReplicas: 1 replicas: 1 --- -apiVersion: apps/v1 -kind: StatefulSet +apiVersion: v1 +kind: ConfigMap metadata: - name: druid-hdfs-datanode-default -status: - readyReplicas: 1 - replicas: 1 + name: druid-znode diff --git a/tests/templates/kuttl/hdfs-deep-storage/00-install-zk.yaml.j2 b/tests/templates/kuttl/hdfs-deep-storage/01-install-zk.yaml.j2 similarity index 66% rename from tests/templates/kuttl/hdfs-deep-storage/00-install-zk.yaml.j2 rename to tests/templates/kuttl/hdfs-deep-storage/01-install-zk.yaml.j2 index e3f705b9..9b6e4d5b 100644 --- a/tests/templates/kuttl/hdfs-deep-storage/00-install-zk.yaml.j2 +++ b/tests/templates/kuttl/hdfs-deep-storage/01-install-zk.yaml.j2 @@ -7,7 +7,15 @@ spec: image: productVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + logging: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} servers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/hdfs-deep-storage/02-assert.yaml b/tests/templates/kuttl/hdfs-deep-storage/02-assert.yaml index a3331d5c..7138c1b0 100644 --- a/tests/templates/kuttl/hdfs-deep-storage/02-assert.yaml +++ b/tests/templates/kuttl/hdfs-deep-storage/02-assert.yaml @@ -6,31 +6,15 @@ timeout: 600 apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-broker-default + name: druid-hdfs-namenode-default status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: derby-druid-coordinator-default -status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: derby-druid-historical-default -status: - readyReplicas: 1 - replicas: 1 + readyReplicas: 2 + replicas: 2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-middlemanager-default + name: druid-hdfs-journalnode-default status: readyReplicas: 1 replicas: 1 @@ -38,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-router-default + name: druid-hdfs-datanode-default status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/hdfs-deep-storage/01-install-hdfs.yaml.j2 b/tests/templates/kuttl/hdfs-deep-storage/02-install-hdfs.yaml.j2 similarity index 63% rename from tests/templates/kuttl/hdfs-deep-storage/01-install-hdfs.yaml.j2 rename to tests/templates/kuttl/hdfs-deep-storage/02-install-hdfs.yaml.j2 index 6d50b981..025841e7 100644 --- a/tests/templates/kuttl/hdfs-deep-storage/01-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/hdfs-deep-storage/02-install-hdfs.yaml.j2 @@ -13,17 +13,29 @@ spec: image: productVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: druid-hdfs-znode dfsReplication: 1 nameNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 2 dataNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 journalNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/hdfs-deep-storage/03-assert.yaml b/tests/templates/kuttl/hdfs-deep-storage/03-assert.yaml index dc085bb1..a3331d5c 100644 --- a/tests/templates/kuttl/hdfs-deep-storage/03-assert.yaml +++ b/tests/templates/kuttl/hdfs-deep-storage/03-assert.yaml @@ -1,12 +1,44 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -timeout: 300 +timeout: 600 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: checks + name: derby-druid-broker-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-coordinator-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-historical-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-middlemanager-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-router-default status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/hdfs-deep-storage/02-install-druid.yaml.j2 b/tests/templates/kuttl/hdfs-deep-storage/03-install-druid.yaml.j2 similarity index 60% rename from tests/templates/kuttl/hdfs-deep-storage/02-install-druid.yaml.j2 rename to tests/templates/kuttl/hdfs-deep-storage/03-install-druid.yaml.j2 index 1de84747..1c0964ae 100644 --- a/tests/templates/kuttl/hdfs-deep-storage/02-install-druid.yaml.j2 +++ b/tests/templates/kuttl/hdfs-deep-storage/03-install-druid.yaml.j2 @@ -23,24 +23,42 @@ spec: connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true host: localhost port: 1527 +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: druid-znode brokers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 coordinators: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 historicals: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 middleManagers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 routers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/hdfs-deep-storage/04-assert.yaml b/tests/templates/kuttl/hdfs-deep-storage/04-assert.yaml index 07a25600..dc085bb1 100644 --- a/tests/templates/kuttl/hdfs-deep-storage/04-assert.yaml +++ b/tests/templates/kuttl/hdfs-deep-storage/04-assert.yaml @@ -1,6 +1,12 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -commands: - - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/healthcheck.py derby-druid timeout: 300 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: checks +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/hdfs-deep-storage/03-checks-container.yaml b/tests/templates/kuttl/hdfs-deep-storage/04-checks-container.yaml similarity index 100% rename from tests/templates/kuttl/hdfs-deep-storage/03-checks-container.yaml rename to tests/templates/kuttl/hdfs-deep-storage/04-checks-container.yaml diff --git a/tests/templates/kuttl/hdfs-deep-storage/05-assert.yaml b/tests/templates/kuttl/hdfs-deep-storage/05-assert.yaml index 1e955ca3..07a25600 100644 --- a/tests/templates/kuttl/hdfs-deep-storage/05-assert.yaml +++ b/tests/templates/kuttl/hdfs-deep-storage/05-assert.yaml @@ -2,5 +2,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert commands: - - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/ingestioncheck.py derby-druid + - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/healthcheck.py derby-druid timeout: 300 diff --git a/tests/templates/kuttl/hdfs-deep-storage/04-healthcheck.yaml b/tests/templates/kuttl/hdfs-deep-storage/05-healthcheck.yaml similarity index 100% rename from tests/templates/kuttl/hdfs-deep-storage/04-healthcheck.yaml rename to tests/templates/kuttl/hdfs-deep-storage/05-healthcheck.yaml diff --git a/tests/templates/kuttl/hdfs-deep-storage/06-assert.yaml b/tests/templates/kuttl/hdfs-deep-storage/06-assert.yaml index c0a4ccb4..1e955ca3 100644 --- a/tests/templates/kuttl/hdfs-deep-storage/06-assert.yaml +++ b/tests/templates/kuttl/hdfs-deep-storage/06-assert.yaml @@ -2,5 +2,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert commands: - - script: kubectl exec -n $NAMESPACE druid-hdfs-namenode-default-0 -- ./bin/hdfs dfs -ls /druid | grep -q /druid/wikipedia-2015-09-12 -timeout: 30 + - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/ingestioncheck.py derby-druid +timeout: 300 diff --git a/tests/templates/kuttl/hdfs-deep-storage/05-ingestioncheck.yaml b/tests/templates/kuttl/hdfs-deep-storage/06-ingestioncheck.yaml similarity index 100% rename from tests/templates/kuttl/hdfs-deep-storage/05-ingestioncheck.yaml rename to tests/templates/kuttl/hdfs-deep-storage/06-ingestioncheck.yaml diff --git a/tests/templates/kuttl/hdfs-deep-storage/07-assert.yaml b/tests/templates/kuttl/hdfs-deep-storage/07-assert.yaml new file mode 100644 index 00000000..c0a4ccb4 --- /dev/null +++ b/tests/templates/kuttl/hdfs-deep-storage/07-assert.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: + - script: kubectl exec -n $NAMESPACE druid-hdfs-namenode-default-0 -- ./bin/hdfs dfs -ls /druid | grep -q /druid/wikipedia-2015-09-12 +timeout: 30 From 3a1c662f377c20bca6aaa91958cfca684945713f Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 15:30:53 +0100 Subject: [PATCH 11/27] Enable log aggregation in ingestion-no-s3-ext test --- .../kuttl/ingestion-no-s3-ext/00-assert.yaml | 17 --------- .../ingestion-no-s3-ext/00-assert.yaml.j2 | 10 ++++++ ...tor-aggregator-discovery-configmap.yaml.j2 | 9 +++++ .../kuttl/ingestion-no-s3-ext/01-assert.yaml | 21 +++-------- ...stall-zk.yaml.j2 => 01-install-zk.yaml.j2} | 8 +++++ .../kuttl/ingestion-no-s3-ext/02-assert.yaml | 26 +++----------- ...l-hdfs.yaml.j2 => 02-install-hdfs.yaml.j2} | 12 +++++++ .../kuttl/ingestion-no-s3-ext/03-assert.yaml | 36 +++++++++++++++++-- ...druid.yaml.j2 => 03-install-druid.yaml.j2} | 18 ++++++++++ .../kuttl/ingestion-no-s3-ext/04-assert.yaml | 10 ++++-- ...ontainer.yaml => 04-checks-container.yaml} | 0 .../kuttl/ingestion-no-s3-ext/05-assert.yaml | 2 +- ...4-healthcheck.yaml => 05-healthcheck.yaml} | 0 .../kuttl/ingestion-no-s3-ext/06-assert.yaml | 6 ++++ ...stioncheck.yaml => 06-ingestioncheck.yaml} | 0 15 files changed, 116 insertions(+), 59 deletions(-) delete mode 100644 tests/templates/kuttl/ingestion-no-s3-ext/00-assert.yaml create mode 100644 tests/templates/kuttl/ingestion-no-s3-ext/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/ingestion-no-s3-ext/00-install-vector-aggregator-discovery-configmap.yaml.j2 rename tests/templates/kuttl/ingestion-no-s3-ext/{00-install-zk.yaml.j2 => 01-install-zk.yaml.j2} (71%) rename tests/templates/kuttl/ingestion-no-s3-ext/{01-install-hdfs.yaml.j2 => 02-install-hdfs.yaml.j2} (58%) rename tests/templates/kuttl/ingestion-no-s3-ext/{02-install-druid.yaml.j2 => 03-install-druid.yaml.j2} (60%) rename tests/templates/kuttl/ingestion-no-s3-ext/{03-checks-container.yaml => 04-checks-container.yaml} (100%) rename tests/templates/kuttl/ingestion-no-s3-ext/{04-healthcheck.yaml => 05-healthcheck.yaml} (100%) create mode 100644 tests/templates/kuttl/ingestion-no-s3-ext/06-assert.yaml rename tests/templates/kuttl/ingestion-no-s3-ext/{05-ingestioncheck.yaml => 06-ingestioncheck.yaml} (100%) diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/00-assert.yaml b/tests/templates/kuttl/ingestion-no-s3-ext/00-assert.yaml deleted file mode 100644 index 4998bcdd..00000000 --- a/tests/templates/kuttl/ingestion-no-s3-ext/00-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -timeout: 300 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-zk-server-default -status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: hdfs-znode diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/00-assert.yaml.j2 b/tests/templates/kuttl/ingestion-no-s3-ext/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/ingestion-no-s3-ext/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/ingestion-no-s3-ext/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/ingestion-no-s3-ext/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/01-assert.yaml b/tests/templates/kuttl/ingestion-no-s3-ext/01-assert.yaml index 7138c1b0..4998bcdd 100644 --- a/tests/templates/kuttl/ingestion-no-s3-ext/01-assert.yaml +++ b/tests/templates/kuttl/ingestion-no-s3-ext/01-assert.yaml @@ -1,28 +1,17 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -timeout: 600 +timeout: 300 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: druid-hdfs-namenode-default -status: - readyReplicas: 2 - replicas: 2 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-hdfs-journalnode-default + name: druid-zk-server-default status: readyReplicas: 1 replicas: 1 --- -apiVersion: apps/v1 -kind: StatefulSet +apiVersion: v1 +kind: ConfigMap metadata: - name: druid-hdfs-datanode-default -status: - readyReplicas: 1 - replicas: 1 + name: hdfs-znode diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/00-install-zk.yaml.j2 b/tests/templates/kuttl/ingestion-no-s3-ext/01-install-zk.yaml.j2 similarity index 71% rename from tests/templates/kuttl/ingestion-no-s3-ext/00-install-zk.yaml.j2 rename to tests/templates/kuttl/ingestion-no-s3-ext/01-install-zk.yaml.j2 index 9958d9fb..a6a17bc5 100644 --- a/tests/templates/kuttl/ingestion-no-s3-ext/00-install-zk.yaml.j2 +++ b/tests/templates/kuttl/ingestion-no-s3-ext/01-install-zk.yaml.j2 @@ -7,7 +7,15 @@ spec: image: productVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + logging: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} servers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/02-assert.yaml b/tests/templates/kuttl/ingestion-no-s3-ext/02-assert.yaml index a3331d5c..7138c1b0 100644 --- a/tests/templates/kuttl/ingestion-no-s3-ext/02-assert.yaml +++ b/tests/templates/kuttl/ingestion-no-s3-ext/02-assert.yaml @@ -6,31 +6,15 @@ timeout: 600 apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-broker-default + name: druid-hdfs-namenode-default status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: derby-druid-coordinator-default -status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: derby-druid-historical-default -status: - readyReplicas: 1 - replicas: 1 + readyReplicas: 2 + replicas: 2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-middlemanager-default + name: druid-hdfs-journalnode-default status: readyReplicas: 1 replicas: 1 @@ -38,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-router-default + name: druid-hdfs-datanode-default status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/01-install-hdfs.yaml.j2 b/tests/templates/kuttl/ingestion-no-s3-ext/02-install-hdfs.yaml.j2 similarity index 58% rename from tests/templates/kuttl/ingestion-no-s3-ext/01-install-hdfs.yaml.j2 rename to tests/templates/kuttl/ingestion-no-s3-ext/02-install-hdfs.yaml.j2 index 6f512cc9..851577fe 100644 --- a/tests/templates/kuttl/ingestion-no-s3-ext/01-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/ingestion-no-s3-ext/02-install-hdfs.yaml.j2 @@ -13,17 +13,29 @@ spec: image: productVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: hdfs-znode dfsReplication: 1 nameNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 2 dataNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 journalNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/03-assert.yaml b/tests/templates/kuttl/ingestion-no-s3-ext/03-assert.yaml index dc085bb1..a3331d5c 100644 --- a/tests/templates/kuttl/ingestion-no-s3-ext/03-assert.yaml +++ b/tests/templates/kuttl/ingestion-no-s3-ext/03-assert.yaml @@ -1,12 +1,44 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -timeout: 300 +timeout: 600 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: checks + name: derby-druid-broker-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-coordinator-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-historical-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-middlemanager-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-router-default status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/02-install-druid.yaml.j2 b/tests/templates/kuttl/ingestion-no-s3-ext/03-install-druid.yaml.j2 similarity index 60% rename from tests/templates/kuttl/ingestion-no-s3-ext/02-install-druid.yaml.j2 rename to tests/templates/kuttl/ingestion-no-s3-ext/03-install-druid.yaml.j2 index 1de84747..1c0964ae 100644 --- a/tests/templates/kuttl/ingestion-no-s3-ext/02-install-druid.yaml.j2 +++ b/tests/templates/kuttl/ingestion-no-s3-ext/03-install-druid.yaml.j2 @@ -23,24 +23,42 @@ spec: connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true host: localhost port: 1527 +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: druid-znode brokers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 coordinators: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 historicals: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 middleManagers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 routers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/04-assert.yaml b/tests/templates/kuttl/ingestion-no-s3-ext/04-assert.yaml index 07a25600..dc085bb1 100644 --- a/tests/templates/kuttl/ingestion-no-s3-ext/04-assert.yaml +++ b/tests/templates/kuttl/ingestion-no-s3-ext/04-assert.yaml @@ -1,6 +1,12 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -commands: - - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/healthcheck.py derby-druid timeout: 300 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: checks +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/03-checks-container.yaml b/tests/templates/kuttl/ingestion-no-s3-ext/04-checks-container.yaml similarity index 100% rename from tests/templates/kuttl/ingestion-no-s3-ext/03-checks-container.yaml rename to tests/templates/kuttl/ingestion-no-s3-ext/04-checks-container.yaml diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/05-assert.yaml b/tests/templates/kuttl/ingestion-no-s3-ext/05-assert.yaml index 1e955ca3..07a25600 100644 --- a/tests/templates/kuttl/ingestion-no-s3-ext/05-assert.yaml +++ b/tests/templates/kuttl/ingestion-no-s3-ext/05-assert.yaml @@ -2,5 +2,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert commands: - - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/ingestioncheck.py derby-druid + - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/healthcheck.py derby-druid timeout: 300 diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/04-healthcheck.yaml b/tests/templates/kuttl/ingestion-no-s3-ext/05-healthcheck.yaml similarity index 100% rename from tests/templates/kuttl/ingestion-no-s3-ext/04-healthcheck.yaml rename to tests/templates/kuttl/ingestion-no-s3-ext/05-healthcheck.yaml diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/06-assert.yaml b/tests/templates/kuttl/ingestion-no-s3-ext/06-assert.yaml new file mode 100644 index 00000000..1e955ca3 --- /dev/null +++ b/tests/templates/kuttl/ingestion-no-s3-ext/06-assert.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: + - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/ingestioncheck.py derby-druid +timeout: 300 diff --git a/tests/templates/kuttl/ingestion-no-s3-ext/05-ingestioncheck.yaml b/tests/templates/kuttl/ingestion-no-s3-ext/06-ingestioncheck.yaml similarity index 100% rename from tests/templates/kuttl/ingestion-no-s3-ext/05-ingestioncheck.yaml rename to tests/templates/kuttl/ingestion-no-s3-ext/06-ingestioncheck.yaml From 5f17132c90695a4890cc95cd449f327528d9dfce Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 15:39:22 +0100 Subject: [PATCH 12/27] Enable log aggregation in ingestion-s3-ext test --- .../kuttl/ingestion-s3-ext/00-assert.yaml | 17 --------- .../kuttl/ingestion-s3-ext/00-assert.yaml.j2 | 10 ++++++ ...tor-aggregator-discovery-configmap.yaml.j2 | 9 +++++ .../kuttl/ingestion-s3-ext/01-assert.yaml | 21 +++-------- ...stall-zk.yaml.j2 => 01-install-zk.yaml.j2} | 8 +++++ .../kuttl/ingestion-s3-ext/02-assert.yaml | 26 +++----------- ...l-hdfs.yaml.j2 => 02-install-hdfs.yaml.j2} | 12 +++++++ .../kuttl/ingestion-s3-ext/03-assert.yaml | 36 +++++++++++++++++-- ...druid.yaml.j2 => 03-install-druid.yaml.j2} | 18 ++++++++++ .../kuttl/ingestion-s3-ext/04-assert.yaml | 10 ++++-- ...ontainer.yaml => 04-checks-container.yaml} | 0 .../kuttl/ingestion-s3-ext/05-assert.yaml | 2 +- ...4-healthcheck.yaml => 05-healthcheck.yaml} | 0 .../kuttl/ingestion-s3-ext/06-assert.yaml | 6 ++++ ...stioncheck.yaml => 06-ingestioncheck.yaml} | 0 15 files changed, 116 insertions(+), 59 deletions(-) delete mode 100644 tests/templates/kuttl/ingestion-s3-ext/00-assert.yaml create mode 100644 tests/templates/kuttl/ingestion-s3-ext/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/ingestion-s3-ext/00-install-vector-aggregator-discovery-configmap.yaml.j2 rename tests/templates/kuttl/ingestion-s3-ext/{00-install-zk.yaml.j2 => 01-install-zk.yaml.j2} (71%) rename tests/templates/kuttl/ingestion-s3-ext/{01-install-hdfs.yaml.j2 => 02-install-hdfs.yaml.j2} (58%) rename tests/templates/kuttl/ingestion-s3-ext/{02-install-druid.yaml.j2 => 03-install-druid.yaml.j2} (71%) rename tests/templates/kuttl/ingestion-s3-ext/{03-checks-container.yaml => 04-checks-container.yaml} (100%) rename tests/templates/kuttl/ingestion-s3-ext/{04-healthcheck.yaml => 05-healthcheck.yaml} (100%) create mode 100644 tests/templates/kuttl/ingestion-s3-ext/06-assert.yaml rename tests/templates/kuttl/ingestion-s3-ext/{05-ingestioncheck.yaml => 06-ingestioncheck.yaml} (100%) diff --git a/tests/templates/kuttl/ingestion-s3-ext/00-assert.yaml b/tests/templates/kuttl/ingestion-s3-ext/00-assert.yaml deleted file mode 100644 index 4998bcdd..00000000 --- a/tests/templates/kuttl/ingestion-s3-ext/00-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -timeout: 300 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-zk-server-default -status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: hdfs-znode diff --git a/tests/templates/kuttl/ingestion-s3-ext/00-assert.yaml.j2 b/tests/templates/kuttl/ingestion-s3-ext/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/ingestion-s3-ext/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/ingestion-s3-ext/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/ingestion-s3-ext/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/ingestion-s3-ext/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/ingestion-s3-ext/01-assert.yaml b/tests/templates/kuttl/ingestion-s3-ext/01-assert.yaml index 7138c1b0..4998bcdd 100644 --- a/tests/templates/kuttl/ingestion-s3-ext/01-assert.yaml +++ b/tests/templates/kuttl/ingestion-s3-ext/01-assert.yaml @@ -1,28 +1,17 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -timeout: 600 +timeout: 300 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: druid-hdfs-namenode-default -status: - readyReplicas: 2 - replicas: 2 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-hdfs-journalnode-default + name: druid-zk-server-default status: readyReplicas: 1 replicas: 1 --- -apiVersion: apps/v1 -kind: StatefulSet +apiVersion: v1 +kind: ConfigMap metadata: - name: druid-hdfs-datanode-default -status: - readyReplicas: 1 - replicas: 1 + name: hdfs-znode diff --git a/tests/templates/kuttl/ingestion-s3-ext/00-install-zk.yaml.j2 b/tests/templates/kuttl/ingestion-s3-ext/01-install-zk.yaml.j2 similarity index 71% rename from tests/templates/kuttl/ingestion-s3-ext/00-install-zk.yaml.j2 rename to tests/templates/kuttl/ingestion-s3-ext/01-install-zk.yaml.j2 index 9958d9fb..a6a17bc5 100644 --- a/tests/templates/kuttl/ingestion-s3-ext/00-install-zk.yaml.j2 +++ b/tests/templates/kuttl/ingestion-s3-ext/01-install-zk.yaml.j2 @@ -7,7 +7,15 @@ spec: image: productVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + logging: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} servers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/ingestion-s3-ext/02-assert.yaml b/tests/templates/kuttl/ingestion-s3-ext/02-assert.yaml index a3331d5c..7138c1b0 100644 --- a/tests/templates/kuttl/ingestion-s3-ext/02-assert.yaml +++ b/tests/templates/kuttl/ingestion-s3-ext/02-assert.yaml @@ -6,31 +6,15 @@ timeout: 600 apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-broker-default + name: druid-hdfs-namenode-default status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: derby-druid-coordinator-default -status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: derby-druid-historical-default -status: - readyReplicas: 1 - replicas: 1 + readyReplicas: 2 + replicas: 2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-middlemanager-default + name: druid-hdfs-journalnode-default status: readyReplicas: 1 replicas: 1 @@ -38,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-router-default + name: druid-hdfs-datanode-default status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/ingestion-s3-ext/01-install-hdfs.yaml.j2 b/tests/templates/kuttl/ingestion-s3-ext/02-install-hdfs.yaml.j2 similarity index 58% rename from tests/templates/kuttl/ingestion-s3-ext/01-install-hdfs.yaml.j2 rename to tests/templates/kuttl/ingestion-s3-ext/02-install-hdfs.yaml.j2 index 6f512cc9..851577fe 100644 --- a/tests/templates/kuttl/ingestion-s3-ext/01-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/ingestion-s3-ext/02-install-hdfs.yaml.j2 @@ -13,17 +13,29 @@ spec: image: productVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: hdfs-znode dfsReplication: 1 nameNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 2 dataNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 journalNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/ingestion-s3-ext/03-assert.yaml b/tests/templates/kuttl/ingestion-s3-ext/03-assert.yaml index dc085bb1..a3331d5c 100644 --- a/tests/templates/kuttl/ingestion-s3-ext/03-assert.yaml +++ b/tests/templates/kuttl/ingestion-s3-ext/03-assert.yaml @@ -1,12 +1,44 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -timeout: 300 +timeout: 600 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: checks + name: derby-druid-broker-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-coordinator-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-historical-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-middlemanager-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-router-default status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/ingestion-s3-ext/02-install-druid.yaml.j2 b/tests/templates/kuttl/ingestion-s3-ext/03-install-druid.yaml.j2 similarity index 71% rename from tests/templates/kuttl/ingestion-s3-ext/02-install-druid.yaml.j2 rename to tests/templates/kuttl/ingestion-s3-ext/03-install-druid.yaml.j2 index 4e6ba1da..442bc8ce 100644 --- a/tests/templates/kuttl/ingestion-s3-ext/02-install-druid.yaml.j2 +++ b/tests/templates/kuttl/ingestion-s3-ext/03-install-druid.yaml.j2 @@ -29,24 +29,42 @@ spec: connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true host: localhost port: 1527 +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: druid-znode brokers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 coordinators: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 historicals: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 middleManagers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 routers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/ingestion-s3-ext/04-assert.yaml b/tests/templates/kuttl/ingestion-s3-ext/04-assert.yaml index 07a25600..dc085bb1 100644 --- a/tests/templates/kuttl/ingestion-s3-ext/04-assert.yaml +++ b/tests/templates/kuttl/ingestion-s3-ext/04-assert.yaml @@ -1,6 +1,12 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -commands: - - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/healthcheck.py derby-druid timeout: 300 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: checks +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/ingestion-s3-ext/03-checks-container.yaml b/tests/templates/kuttl/ingestion-s3-ext/04-checks-container.yaml similarity index 100% rename from tests/templates/kuttl/ingestion-s3-ext/03-checks-container.yaml rename to tests/templates/kuttl/ingestion-s3-ext/04-checks-container.yaml diff --git a/tests/templates/kuttl/ingestion-s3-ext/05-assert.yaml b/tests/templates/kuttl/ingestion-s3-ext/05-assert.yaml index 1e955ca3..07a25600 100644 --- a/tests/templates/kuttl/ingestion-s3-ext/05-assert.yaml +++ b/tests/templates/kuttl/ingestion-s3-ext/05-assert.yaml @@ -2,5 +2,5 @@ apiVersion: kuttl.dev/v1beta1 kind: TestAssert commands: - - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/ingestioncheck.py derby-druid + - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/healthcheck.py derby-druid timeout: 300 diff --git a/tests/templates/kuttl/ingestion-s3-ext/04-healthcheck.yaml b/tests/templates/kuttl/ingestion-s3-ext/05-healthcheck.yaml similarity index 100% rename from tests/templates/kuttl/ingestion-s3-ext/04-healthcheck.yaml rename to tests/templates/kuttl/ingestion-s3-ext/05-healthcheck.yaml diff --git a/tests/templates/kuttl/ingestion-s3-ext/06-assert.yaml b/tests/templates/kuttl/ingestion-s3-ext/06-assert.yaml new file mode 100644 index 00000000..1e955ca3 --- /dev/null +++ b/tests/templates/kuttl/ingestion-s3-ext/06-assert.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +commands: + - script: kubectl exec -n $NAMESPACE checks-0 -- python /tmp/ingestioncheck.py derby-druid +timeout: 300 diff --git a/tests/templates/kuttl/ingestion-s3-ext/05-ingestioncheck.yaml b/tests/templates/kuttl/ingestion-s3-ext/06-ingestioncheck.yaml similarity index 100% rename from tests/templates/kuttl/ingestion-s3-ext/05-ingestioncheck.yaml rename to tests/templates/kuttl/ingestion-s3-ext/06-ingestioncheck.yaml From 3cd0edb67739006df46cf6f19244543afc238a2d Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 15:47:43 +0100 Subject: [PATCH 13/27] Enable log aggregation in ldap-authentication test --- .../ldap-authentication/00-assert.yaml.j2 | 10 ++++ ...tor-aggregator-discovery-configmap.yaml.j2 | 9 ++++ .../{00-assert.yaml => 01-assert.yaml} | 0 ...stall-zk.yaml.j2 => 01-install-zk.yaml.j2} | 8 ++++ .../02-install-hdfs.yaml.j2 | 46 ++++++++++++------- .../05-install-druid.yaml.j2 | 18 ++++++++ 6 files changed, 74 insertions(+), 17 deletions(-) create mode 100644 tests/templates/kuttl/ldap-authentication/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/ldap-authentication/00-install-vector-aggregator-discovery-configmap.yaml.j2 rename tests/templates/kuttl/ldap-authentication/{00-assert.yaml => 01-assert.yaml} (100%) rename tests/templates/kuttl/ldap-authentication/{00-install-zk.yaml.j2 => 01-install-zk.yaml.j2} (71%) diff --git a/tests/templates/kuttl/ldap-authentication/00-assert.yaml.j2 b/tests/templates/kuttl/ldap-authentication/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/ldap-authentication/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/ldap-authentication/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/ldap-authentication/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/ldap-authentication/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/ldap-authentication/00-assert.yaml b/tests/templates/kuttl/ldap-authentication/01-assert.yaml similarity index 100% rename from tests/templates/kuttl/ldap-authentication/00-assert.yaml rename to tests/templates/kuttl/ldap-authentication/01-assert.yaml diff --git a/tests/templates/kuttl/ldap-authentication/00-install-zk.yaml.j2 b/tests/templates/kuttl/ldap-authentication/01-install-zk.yaml.j2 similarity index 71% rename from tests/templates/kuttl/ldap-authentication/00-install-zk.yaml.j2 rename to tests/templates/kuttl/ldap-authentication/01-install-zk.yaml.j2 index 9958d9fb..a6a17bc5 100644 --- a/tests/templates/kuttl/ldap-authentication/00-install-zk.yaml.j2 +++ b/tests/templates/kuttl/ldap-authentication/01-install-zk.yaml.j2 @@ -7,7 +7,15 @@ spec: image: productVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + logging: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} servers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/ldap-authentication/02-install-hdfs.yaml.j2 b/tests/templates/kuttl/ldap-authentication/02-install-hdfs.yaml.j2 index d3729bca..bba42532 100644 --- a/tests/templates/kuttl/ldap-authentication/02-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/ldap-authentication/02-install-hdfs.yaml.j2 @@ -10,20 +10,32 @@ kind: HdfsCluster metadata: name: druid-hdfs spec: - image: - productVersion: "{{ test_scenario['values']['hadoop-latest'].split('-stackable')[0] }}" - stackableVersion: "{{ test_scenario['values']['hadoop-latest'].split('-stackable')[1] }}" - zookeeperConfigMapName: hdfs-znode - dfsReplication: 1 - nameNodes: - roleGroups: - default: - replicas: 2 - dataNodes: - roleGroups: - default: - replicas: 1 - journalNodes: - roleGroups: - default: - replicas: 1 + image: + productVersion: "{{ test_scenario['values']['hadoop-latest'].split('-stackable')[0] }}" + stackableVersion: "{{ test_scenario['values']['hadoop-latest'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} + zookeeperConfigMapName: hdfs-znode + dfsReplication: 1 + nameNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 2 + dataNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 1 + journalNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 1 diff --git a/tests/templates/kuttl/ldap-authentication/05-install-druid.yaml.j2 b/tests/templates/kuttl/ldap-authentication/05-install-druid.yaml.j2 index 8d4ce06c..f7d5744e 100644 --- a/tests/templates/kuttl/ldap-authentication/05-install-druid.yaml.j2 +++ b/tests/templates/kuttl/ldap-authentication/05-install-druid.yaml.j2 @@ -72,24 +72,42 @@ spec: port: 1527 tls: serverAndInternalSecretClass: null +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: druid-znode brokers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 coordinators: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 historicals: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 middleManagers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 routers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 From 35ccdba52dacccf7cba9eeaf9ecd000737d05042 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 15:56:40 +0100 Subject: [PATCH 14/27] Enable log aggregation in orphand-resources test --- .../kuttl/orphaned-resources/00-assert.yaml | 17 ------- .../orphaned-resources/00-assert.yaml.j2 | 10 +++++ ...tor-aggregator-discovery-configmap.yaml.j2 | 9 ++++ .../kuttl/orphaned-resources/01-assert.yaml | 19 ++------ ...stall-zk.yaml.j2 => 01-install-zk.yaml.j2} | 8 ++++ .../kuttl/orphaned-resources/02-assert.yaml | 28 +++--------- ...l-hdfs.yaml.j2 => 02-install-hdfs.yaml.j2} | 12 +++++ .../kuttl/orphaned-resources/03-assert.yaml | 12 ++--- ...druid.yaml.j2 => 03-install-druid.yaml.j2} | 18 ++++++++ .../kuttl/orphaned-resources/04-assert.yaml | 44 +++++++++++++++++++ .../{03-errors.yaml => 04-errors.yaml} | 0 ...-druid.yaml.j2 => 04-update-druid.yaml.j2} | 18 ++++++++ 12 files changed, 135 insertions(+), 60 deletions(-) delete mode 100644 tests/templates/kuttl/orphaned-resources/00-assert.yaml create mode 100644 tests/templates/kuttl/orphaned-resources/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/orphaned-resources/00-install-vector-aggregator-discovery-configmap.yaml.j2 rename tests/templates/kuttl/orphaned-resources/{00-install-zk.yaml.j2 => 01-install-zk.yaml.j2} (71%) rename tests/templates/kuttl/orphaned-resources/{01-install-hdfs.yaml.j2 => 02-install-hdfs.yaml.j2} (58%) rename tests/templates/kuttl/orphaned-resources/{02-install-druid.yaml.j2 => 03-install-druid.yaml.j2} (60%) create mode 100644 tests/templates/kuttl/orphaned-resources/04-assert.yaml rename tests/templates/kuttl/orphaned-resources/{03-errors.yaml => 04-errors.yaml} (100%) rename tests/templates/kuttl/orphaned-resources/{03-update-druid.yaml.j2 => 04-update-druid.yaml.j2} (62%) diff --git a/tests/templates/kuttl/orphaned-resources/00-assert.yaml b/tests/templates/kuttl/orphaned-resources/00-assert.yaml deleted file mode 100644 index 4998bcdd..00000000 --- a/tests/templates/kuttl/orphaned-resources/00-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -timeout: 300 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-zk-server-default -status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: hdfs-znode diff --git a/tests/templates/kuttl/orphaned-resources/00-assert.yaml.j2 b/tests/templates/kuttl/orphaned-resources/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/orphaned-resources/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/orphaned-resources/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/orphaned-resources/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/orphaned-resources/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/orphaned-resources/01-assert.yaml b/tests/templates/kuttl/orphaned-resources/01-assert.yaml index f486d011..4998bcdd 100644 --- a/tests/templates/kuttl/orphaned-resources/01-assert.yaml +++ b/tests/templates/kuttl/orphaned-resources/01-assert.yaml @@ -6,23 +6,12 @@ timeout: 300 apiVersion: apps/v1 kind: StatefulSet metadata: - name: druid-hdfs-namenode-default -status: - readyReplicas: 2 - replicas: 2 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-hdfs-journalnode-default + name: druid-zk-server-default status: readyReplicas: 1 replicas: 1 --- -apiVersion: apps/v1 -kind: StatefulSet +apiVersion: v1 +kind: ConfigMap metadata: - name: druid-hdfs-datanode-default -status: - readyReplicas: 1 - replicas: 1 + name: hdfs-znode diff --git a/tests/templates/kuttl/orphaned-resources/00-install-zk.yaml.j2 b/tests/templates/kuttl/orphaned-resources/01-install-zk.yaml.j2 similarity index 71% rename from tests/templates/kuttl/orphaned-resources/00-install-zk.yaml.j2 rename to tests/templates/kuttl/orphaned-resources/01-install-zk.yaml.j2 index 9958d9fb..a6a17bc5 100644 --- a/tests/templates/kuttl/orphaned-resources/00-install-zk.yaml.j2 +++ b/tests/templates/kuttl/orphaned-resources/01-install-zk.yaml.j2 @@ -7,7 +7,15 @@ spec: image: productVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + logging: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} servers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/orphaned-resources/02-assert.yaml b/tests/templates/kuttl/orphaned-resources/02-assert.yaml index a3331d5c..f486d011 100644 --- a/tests/templates/kuttl/orphaned-resources/02-assert.yaml +++ b/tests/templates/kuttl/orphaned-resources/02-assert.yaml @@ -1,36 +1,20 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -timeout: 600 +timeout: 300 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-broker-default + name: druid-hdfs-namenode-default status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: derby-druid-coordinator-default -status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: derby-druid-historical-default -status: - readyReplicas: 1 - replicas: 1 + readyReplicas: 2 + replicas: 2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-middlemanager-default + name: druid-hdfs-journalnode-default status: readyReplicas: 1 replicas: 1 @@ -38,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-router-default + name: druid-hdfs-datanode-default status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/orphaned-resources/01-install-hdfs.yaml.j2 b/tests/templates/kuttl/orphaned-resources/02-install-hdfs.yaml.j2 similarity index 58% rename from tests/templates/kuttl/orphaned-resources/01-install-hdfs.yaml.j2 rename to tests/templates/kuttl/orphaned-resources/02-install-hdfs.yaml.j2 index 6f512cc9..851577fe 100644 --- a/tests/templates/kuttl/orphaned-resources/01-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/orphaned-resources/02-install-hdfs.yaml.j2 @@ -13,17 +13,29 @@ spec: image: productVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: hdfs-znode dfsReplication: 1 nameNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 2 dataNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 journalNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/orphaned-resources/03-assert.yaml b/tests/templates/kuttl/orphaned-resources/03-assert.yaml index 9dc71495..a3331d5c 100644 --- a/tests/templates/kuttl/orphaned-resources/03-assert.yaml +++ b/tests/templates/kuttl/orphaned-resources/03-assert.yaml @@ -1,12 +1,12 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -timeout: 180 +timeout: 600 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-broker-newrg + name: derby-druid-broker-default status: readyReplicas: 1 replicas: 1 @@ -14,7 +14,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-coordinator-newrg + name: derby-druid-coordinator-default status: readyReplicas: 1 replicas: 1 @@ -22,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-historical-newrg + name: derby-druid-historical-default status: readyReplicas: 1 replicas: 1 @@ -30,7 +30,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-middlemanager-newrg + name: derby-druid-middlemanager-default status: readyReplicas: 1 replicas: 1 @@ -38,7 +38,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: derby-druid-router-newrg + name: derby-druid-router-default status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/orphaned-resources/02-install-druid.yaml.j2 b/tests/templates/kuttl/orphaned-resources/03-install-druid.yaml.j2 similarity index 60% rename from tests/templates/kuttl/orphaned-resources/02-install-druid.yaml.j2 rename to tests/templates/kuttl/orphaned-resources/03-install-druid.yaml.j2 index 1de84747..1c0964ae 100644 --- a/tests/templates/kuttl/orphaned-resources/02-install-druid.yaml.j2 +++ b/tests/templates/kuttl/orphaned-resources/03-install-druid.yaml.j2 @@ -23,24 +23,42 @@ spec: connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true host: localhost port: 1527 +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: druid-znode brokers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 coordinators: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 historicals: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 middleManagers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 routers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/orphaned-resources/04-assert.yaml b/tests/templates/kuttl/orphaned-resources/04-assert.yaml new file mode 100644 index 00000000..9dc71495 --- /dev/null +++ b/tests/templates/kuttl/orphaned-resources/04-assert.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 180 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-broker-newrg +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-coordinator-newrg +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-historical-newrg +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-middlemanager-newrg +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: derby-druid-router-newrg +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/orphaned-resources/03-errors.yaml b/tests/templates/kuttl/orphaned-resources/04-errors.yaml similarity index 100% rename from tests/templates/kuttl/orphaned-resources/03-errors.yaml rename to tests/templates/kuttl/orphaned-resources/04-errors.yaml diff --git a/tests/templates/kuttl/orphaned-resources/03-update-druid.yaml.j2 b/tests/templates/kuttl/orphaned-resources/04-update-druid.yaml.j2 similarity index 62% rename from tests/templates/kuttl/orphaned-resources/03-update-druid.yaml.j2 rename to tests/templates/kuttl/orphaned-resources/04-update-druid.yaml.j2 index 7fdfd88e..708ed3f1 100644 --- a/tests/templates/kuttl/orphaned-resources/03-update-druid.yaml.j2 +++ b/tests/templates/kuttl/orphaned-resources/04-update-druid.yaml.j2 @@ -23,28 +23,46 @@ spec: connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true host: localhost port: 1527 +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: druid-znode brokers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: null newrg: replicas: 1 coordinators: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: null newrg: replicas: 1 historicals: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: null newrg: replicas: 1 middleManagers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: null newrg: replicas: 1 routers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: null newrg: From 16257c345d396d5d6ed4d83424f755dc275d372c Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 16:08:50 +0100 Subject: [PATCH 15/27] Enable log aggregation in resources test --- .../kuttl/resources/00-assert.yaml.j2 | 10 ++++++++++ ...ctor-aggregator-discovery-configmap.yaml.j2 | 9 +++++++++ .../{00-assert.yaml => 10-assert.yaml} | 0 ...nstall-zk.yaml.j2 => 10-install-zk.yaml.j2} | 8 ++++++++ ...-minio.yaml.j2 => 20-install-minio.yaml.j2} | 0 .../{20-assert.yaml => 30-assert.yaml.j2} | 18 ++++++++++++++++++ ...-druid.yaml.j2 => 30-install-druid.yaml.j2} | 17 +++++++++++++++++ .../{30-assert.yaml => 40-assert.yaml} | 0 8 files changed, 62 insertions(+) create mode 100644 tests/templates/kuttl/resources/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/resources/00-install-vector-aggregator-discovery-configmap.yaml.j2 rename tests/templates/kuttl/resources/{00-assert.yaml => 10-assert.yaml} (100%) rename tests/templates/kuttl/resources/{00-install-zk.yaml.j2 => 10-install-zk.yaml.j2} (66%) rename tests/templates/kuttl/resources/{10-install-minio.yaml.j2 => 20-install-minio.yaml.j2} (100%) rename tests/templates/kuttl/resources/{20-assert.yaml => 30-assert.yaml.j2} (86%) rename tests/templates/kuttl/resources/{20-install-druid.yaml.j2 => 30-install-druid.yaml.j2} (72%) rename tests/templates/kuttl/resources/{30-assert.yaml => 40-assert.yaml} (100%) diff --git a/tests/templates/kuttl/resources/00-assert.yaml.j2 b/tests/templates/kuttl/resources/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/resources/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/resources/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/resources/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/resources/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/resources/00-assert.yaml b/tests/templates/kuttl/resources/10-assert.yaml similarity index 100% rename from tests/templates/kuttl/resources/00-assert.yaml rename to tests/templates/kuttl/resources/10-assert.yaml diff --git a/tests/templates/kuttl/resources/00-install-zk.yaml.j2 b/tests/templates/kuttl/resources/10-install-zk.yaml.j2 similarity index 66% rename from tests/templates/kuttl/resources/00-install-zk.yaml.j2 rename to tests/templates/kuttl/resources/10-install-zk.yaml.j2 index e3f705b9..9b6e4d5b 100644 --- a/tests/templates/kuttl/resources/00-install-zk.yaml.j2 +++ b/tests/templates/kuttl/resources/10-install-zk.yaml.j2 @@ -7,7 +7,15 @@ spec: image: productVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + logging: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} servers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/resources/10-install-minio.yaml.j2 b/tests/templates/kuttl/resources/20-install-minio.yaml.j2 similarity index 100% rename from tests/templates/kuttl/resources/10-install-minio.yaml.j2 rename to tests/templates/kuttl/resources/20-install-minio.yaml.j2 diff --git a/tests/templates/kuttl/resources/20-assert.yaml b/tests/templates/kuttl/resources/30-assert.yaml.j2 similarity index 86% rename from tests/templates/kuttl/resources/20-assert.yaml rename to tests/templates/kuttl/resources/30-assert.yaml.j2 index fdef666e..aef92fe6 100644 --- a/tests/templates/kuttl/resources/20-assert.yaml +++ b/tests/templates/kuttl/resources/30-assert.yaml.j2 @@ -19,6 +19,9 @@ spec: limits: cpu: "4" memory: 2Gi +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + - name: vector +{% endif %} status: readyReplicas: 1 replicas: 1 @@ -39,6 +42,9 @@ spec: limits: cpu: "4" memory: 2Gi +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + - name: vector +{% endif %} status: readyReplicas: 1 replicas: 1 @@ -59,6 +65,9 @@ spec: limits: cpu: "4" memory: 4Gi +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + - name: vector +{% endif %} volumes: - name: tls-mount ephemeral: @@ -102,6 +111,9 @@ spec: limits: cpu: "1" memory: 1Gi +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + - name: vector +{% endif %} status: readyReplicas: 1 replicas: 1 @@ -122,6 +134,9 @@ spec: limits: cpu: "3" memory: 3Gi +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + - name: vector +{% endif %} --- apiVersion: apps/v1 kind: StatefulSet @@ -139,6 +154,9 @@ spec: limits: cpu: "4" memory: 2Gi +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + - name: vector +{% endif %} status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/resources/20-install-druid.yaml.j2 b/tests/templates/kuttl/resources/30-install-druid.yaml.j2 similarity index 72% rename from tests/templates/kuttl/resources/20-install-druid.yaml.j2 rename to tests/templates/kuttl/resources/30-install-druid.yaml.j2 index dede7824..6e7ec232 100644 --- a/tests/templates/kuttl/resources/20-install-druid.yaml.j2 +++ b/tests/templates/kuttl/resources/30-install-druid.yaml.j2 @@ -27,16 +27,28 @@ spec: connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true host: localhost port: 1527 +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: druid-znode brokers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 coordinators: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 historicals: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 @@ -52,6 +64,8 @@ spec: middleManagers: config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} resources: cpu: min: 100m @@ -71,6 +85,9 @@ spec: limit: 3Gi replicas: 1 routers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/resources/30-assert.yaml b/tests/templates/kuttl/resources/40-assert.yaml similarity index 100% rename from tests/templates/kuttl/resources/30-assert.yaml rename to tests/templates/kuttl/resources/40-assert.yaml From eb052ef6d2c1e02548785c7645046ed8a3e0e5a4 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 16:16:51 +0100 Subject: [PATCH 16/27] Enable log aggregation in s3-deep-storage test --- .../kuttl/s3-deep-storage/00-assert.yaml.j2 | 10 ++++++++++ ...ctor-aggregator-discovery-configmap.yaml.j2 | 9 +++++++++ .../{00-assert.yaml => 01-assert.yaml} | 0 ...nstall-zk.yaml.j2 => 01-install-zk.yaml.j2} | 8 ++++++++ ...2 => 02-install-minio-certificates.yaml.j2} | 0 ...-minio.yaml.j2 => 03-install-minio.yaml.j2} | 0 .../{03-assert.yaml => 04-assert.yaml} | 0 ...container.yaml => 04-checks-container.yaml} | 0 .../s3-deep-storage/10-install-druid.yaml.j2 | 18 ++++++++++++++++++ 9 files changed, 45 insertions(+) create mode 100644 tests/templates/kuttl/s3-deep-storage/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/s3-deep-storage/00-install-vector-aggregator-discovery-configmap.yaml.j2 rename tests/templates/kuttl/s3-deep-storage/{00-assert.yaml => 01-assert.yaml} (100%) rename tests/templates/kuttl/s3-deep-storage/{00-install-zk.yaml.j2 => 01-install-zk.yaml.j2} (67%) rename tests/templates/kuttl/s3-deep-storage/{01-install-minio-certificates.yaml.j2 => 02-install-minio-certificates.yaml.j2} (100%) rename tests/templates/kuttl/s3-deep-storage/{02-install-minio.yaml.j2 => 03-install-minio.yaml.j2} (100%) rename tests/templates/kuttl/s3-deep-storage/{03-assert.yaml => 04-assert.yaml} (100%) rename tests/templates/kuttl/s3-deep-storage/{03-checks-container.yaml => 04-checks-container.yaml} (100%) diff --git a/tests/templates/kuttl/s3-deep-storage/00-assert.yaml.j2 b/tests/templates/kuttl/s3-deep-storage/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/s3-deep-storage/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/s3-deep-storage/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/s3-deep-storage/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/s3-deep-storage/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/s3-deep-storage/00-assert.yaml b/tests/templates/kuttl/s3-deep-storage/01-assert.yaml similarity index 100% rename from tests/templates/kuttl/s3-deep-storage/00-assert.yaml rename to tests/templates/kuttl/s3-deep-storage/01-assert.yaml diff --git a/tests/templates/kuttl/s3-deep-storage/00-install-zk.yaml.j2 b/tests/templates/kuttl/s3-deep-storage/01-install-zk.yaml.j2 similarity index 67% rename from tests/templates/kuttl/s3-deep-storage/00-install-zk.yaml.j2 rename to tests/templates/kuttl/s3-deep-storage/01-install-zk.yaml.j2 index 80aa215e..42c7dd18 100644 --- a/tests/templates/kuttl/s3-deep-storage/00-install-zk.yaml.j2 +++ b/tests/templates/kuttl/s3-deep-storage/01-install-zk.yaml.j2 @@ -7,7 +7,15 @@ spec: image: productVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + logging: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} servers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/s3-deep-storage/01-install-minio-certificates.yaml.j2 b/tests/templates/kuttl/s3-deep-storage/02-install-minio-certificates.yaml.j2 similarity index 100% rename from tests/templates/kuttl/s3-deep-storage/01-install-minio-certificates.yaml.j2 rename to tests/templates/kuttl/s3-deep-storage/02-install-minio-certificates.yaml.j2 diff --git a/tests/templates/kuttl/s3-deep-storage/02-install-minio.yaml.j2 b/tests/templates/kuttl/s3-deep-storage/03-install-minio.yaml.j2 similarity index 100% rename from tests/templates/kuttl/s3-deep-storage/02-install-minio.yaml.j2 rename to tests/templates/kuttl/s3-deep-storage/03-install-minio.yaml.j2 diff --git a/tests/templates/kuttl/s3-deep-storage/03-assert.yaml b/tests/templates/kuttl/s3-deep-storage/04-assert.yaml similarity index 100% rename from tests/templates/kuttl/s3-deep-storage/03-assert.yaml rename to tests/templates/kuttl/s3-deep-storage/04-assert.yaml diff --git a/tests/templates/kuttl/s3-deep-storage/03-checks-container.yaml b/tests/templates/kuttl/s3-deep-storage/04-checks-container.yaml similarity index 100% rename from tests/templates/kuttl/s3-deep-storage/03-checks-container.yaml rename to tests/templates/kuttl/s3-deep-storage/04-checks-container.yaml diff --git a/tests/templates/kuttl/s3-deep-storage/10-install-druid.yaml.j2 b/tests/templates/kuttl/s3-deep-storage/10-install-druid.yaml.j2 index a06c63d4..ebf9c0b5 100644 --- a/tests/templates/kuttl/s3-deep-storage/10-install-druid.yaml.j2 +++ b/tests/templates/kuttl/s3-deep-storage/10-install-druid.yaml.j2 @@ -59,24 +59,42 @@ spec: connString: jdbc:derby://localhost:1527/var/druid/metadata.db;create=true host: localhost port: 1527 +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: druid-znode brokers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 coordinators: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 historicals: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 middleManagers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 routers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 From 012f54acf3b1c97be802a4064cac568225c8f6b2 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 16:24:07 +0100 Subject: [PATCH 17/27] Enable log aggregation in smoke test --- tests/templates/kuttl/smoke/00-assert.yaml | 12 --- tests/templates/kuttl/smoke/00-assert.yaml.j2 | 10 +++ ...tor-aggregator-discovery-configmap.yaml.j2 | 9 +++ tests/templates/kuttl/smoke/01-assert.yaml | 9 +-- ...postgres.yaml => 01-install-postgres.yaml} | 0 tests/templates/kuttl/smoke/02-assert.yaml | 19 +---- ...stall-zk.yaml.j2 => 02-install-zk.yaml.j2} | 8 ++ tests/templates/kuttl/smoke/03-assert.yaml | 57 ++------------- ...l-hdfs.yaml.j2 => 03-install-hdfs.yaml.j2} | 12 +++ tests/templates/kuttl/smoke/04-assert.yaml | 73 +++++++++++++++++++ ...druid.yaml.j2 => 04-install-druid.yaml.j2} | 18 +++++ 11 files changed, 142 insertions(+), 85 deletions(-) delete mode 100644 tests/templates/kuttl/smoke/00-assert.yaml create mode 100644 tests/templates/kuttl/smoke/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/smoke/00-install-vector-aggregator-discovery-configmap.yaml.j2 rename tests/templates/kuttl/smoke/{00-install-postgres.yaml => 01-install-postgres.yaml} (100%) rename tests/templates/kuttl/smoke/{01-install-zk.yaml.j2 => 02-install-zk.yaml.j2} (70%) rename tests/templates/kuttl/smoke/{02-install-hdfs.yaml.j2 => 03-install-hdfs.yaml.j2} (58%) create mode 100644 tests/templates/kuttl/smoke/04-assert.yaml rename tests/templates/kuttl/smoke/{03-install-druid.yaml.j2 => 04-install-druid.yaml.j2} (61%) diff --git a/tests/templates/kuttl/smoke/00-assert.yaml b/tests/templates/kuttl/smoke/00-assert.yaml deleted file mode 100644 index 7702af69..00000000 --- a/tests/templates/kuttl/smoke/00-assert.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -timeout: 480 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-postgresql -status: - readyReplicas: 1 - replicas: 1 diff --git a/tests/templates/kuttl/smoke/00-assert.yaml.j2 b/tests/templates/kuttl/smoke/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/smoke/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/smoke/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/smoke/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/smoke/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/smoke/01-assert.yaml b/tests/templates/kuttl/smoke/01-assert.yaml index 4998bcdd..7702af69 100644 --- a/tests/templates/kuttl/smoke/01-assert.yaml +++ b/tests/templates/kuttl/smoke/01-assert.yaml @@ -1,17 +1,12 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -timeout: 300 +timeout: 480 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: druid-zk-server-default + name: druid-postgresql status: readyReplicas: 1 replicas: 1 ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: hdfs-znode diff --git a/tests/templates/kuttl/smoke/00-install-postgres.yaml b/tests/templates/kuttl/smoke/01-install-postgres.yaml similarity index 100% rename from tests/templates/kuttl/smoke/00-install-postgres.yaml rename to tests/templates/kuttl/smoke/01-install-postgres.yaml diff --git a/tests/templates/kuttl/smoke/02-assert.yaml b/tests/templates/kuttl/smoke/02-assert.yaml index f486d011..4998bcdd 100644 --- a/tests/templates/kuttl/smoke/02-assert.yaml +++ b/tests/templates/kuttl/smoke/02-assert.yaml @@ -6,23 +6,12 @@ timeout: 300 apiVersion: apps/v1 kind: StatefulSet metadata: - name: druid-hdfs-namenode-default -status: - readyReplicas: 2 - replicas: 2 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-hdfs-journalnode-default + name: druid-zk-server-default status: readyReplicas: 1 replicas: 1 --- -apiVersion: apps/v1 -kind: StatefulSet +apiVersion: v1 +kind: ConfigMap metadata: - name: druid-hdfs-datanode-default -status: - readyReplicas: 1 - replicas: 1 + name: hdfs-znode diff --git a/tests/templates/kuttl/smoke/01-install-zk.yaml.j2 b/tests/templates/kuttl/smoke/02-install-zk.yaml.j2 similarity index 70% rename from tests/templates/kuttl/smoke/01-install-zk.yaml.j2 rename to tests/templates/kuttl/smoke/02-install-zk.yaml.j2 index a4d5f401..578fcf55 100644 --- a/tests/templates/kuttl/smoke/01-install-zk.yaml.j2 +++ b/tests/templates/kuttl/smoke/02-install-zk.yaml.j2 @@ -7,7 +7,15 @@ spec: image: productVersion: "{{ test_scenario['values']['zookeeper'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + logging: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} servers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/smoke/03-assert.yaml b/tests/templates/kuttl/smoke/03-assert.yaml index 91a06143..f486d011 100644 --- a/tests/templates/kuttl/smoke/03-assert.yaml +++ b/tests/templates/kuttl/smoke/03-assert.yaml @@ -1,65 +1,20 @@ --- apiVersion: kuttl.dev/v1beta1 kind: TestAssert -timeout: 600 +timeout: 300 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: druid-broker-default + name: druid-hdfs-namenode-default status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-coordinator-default -status: - readyReplicas: 1 - replicas: 1 ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: druid-historical-default -status: - readyReplicas: 1 - replicas: 1 -spec: - template: - spec: - volumes: - - name: tls-mount - ephemeral: - volumeClaimTemplate: - metadata: - annotations: - secrets.stackable.tech/class: tls - secrets.stackable.tech/scope: pod,node - creationTimestamp: null - spec: - storageClassName: secrets.stackable.tech - - name: tls - emptyDir: {} - - name: config - configMap: - name: druid-historical-default - - name: rwconfig - emptyDir: {} - - name: log-config - - name: log - - name: hdfs - configMap: - name: druid-hdfs - - name: segment-cache - emptyDir: - sizeLimit: 1G + readyReplicas: 2 + replicas: 2 --- apiVersion: apps/v1 kind: StatefulSet metadata: - name: druid-middlemanager-default + name: druid-hdfs-journalnode-default status: readyReplicas: 1 replicas: 1 @@ -67,7 +22,7 @@ status: apiVersion: apps/v1 kind: StatefulSet metadata: - name: druid-router-default + name: druid-hdfs-datanode-default status: readyReplicas: 1 replicas: 1 diff --git a/tests/templates/kuttl/smoke/02-install-hdfs.yaml.j2 b/tests/templates/kuttl/smoke/03-install-hdfs.yaml.j2 similarity index 58% rename from tests/templates/kuttl/smoke/02-install-hdfs.yaml.j2 rename to tests/templates/kuttl/smoke/03-install-hdfs.yaml.j2 index 6f512cc9..851577fe 100644 --- a/tests/templates/kuttl/smoke/02-install-hdfs.yaml.j2 +++ b/tests/templates/kuttl/smoke/03-install-hdfs.yaml.j2 @@ -13,17 +13,29 @@ spec: image: productVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['hadoop'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: hdfs-znode dfsReplication: 1 nameNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 2 dataNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 journalNodes: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/smoke/04-assert.yaml b/tests/templates/kuttl/smoke/04-assert.yaml new file mode 100644 index 00000000..91a06143 --- /dev/null +++ b/tests/templates/kuttl/smoke/04-assert.yaml @@ -0,0 +1,73 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-broker-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-coordinator-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-historical-default +status: + readyReplicas: 1 + replicas: 1 +spec: + template: + spec: + volumes: + - name: tls-mount + ephemeral: + volumeClaimTemplate: + metadata: + annotations: + secrets.stackable.tech/class: tls + secrets.stackable.tech/scope: pod,node + creationTimestamp: null + spec: + storageClassName: secrets.stackable.tech + - name: tls + emptyDir: {} + - name: config + configMap: + name: druid-historical-default + - name: rwconfig + emptyDir: {} + - name: log-config + - name: log + - name: hdfs + configMap: + name: druid-hdfs + - name: segment-cache + emptyDir: + sizeLimit: 1G +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-middlemanager-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: druid-router-default +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/smoke/03-install-druid.yaml.j2 b/tests/templates/kuttl/smoke/04-install-druid.yaml.j2 similarity index 61% rename from tests/templates/kuttl/smoke/03-install-druid.yaml.j2 rename to tests/templates/kuttl/smoke/04-install-druid.yaml.j2 index abf9c8d9..5fd5e800 100644 --- a/tests/templates/kuttl/smoke/03-install-druid.yaml.j2 +++ b/tests/templates/kuttl/smoke/04-install-druid.yaml.j2 @@ -25,24 +25,42 @@ spec: port: 5432 user: druid password: druid +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} zookeeperConfigMapName: druid-znode brokers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 coordinators: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 historicals: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 middleManagers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 routers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 From b8183b48c582ebb6d414aeece4195f13e985e27f Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 16:34:14 +0100 Subject: [PATCH 18/27] Enable log aggregation in tls test --- tests/templates/kuttl/tls/00-assert.yaml.j2 | 10 ++++++++++ ...ctor-aggregator-discovery-configmap.yaml.j2 | 9 +++++++++ .../tls/{00-assert.yaml => 01-assert.yaml} | 0 ...nstall-zk.yaml.j2 => 01-install-zk.yaml.j2} | 8 ++++++++ ...2 => 02-install-minio-certificates.yaml.j2} | 0 ...-minio.yaml.j2 => 03-install-minio.yaml.j2} | 0 .../{03-assert.yaml.j2 => 04-assert.yaml.j2} | 0 ...-druid.yaml.j2 => 04-install-druid.yaml.j2} | 18 ++++++++++++++++++ .../tls/{04-assert.yaml => 05-assert.yaml} | 0 ...hecks.yaml.j2 => 05-install-checks.yaml.j2} | 0 10 files changed, 45 insertions(+) create mode 100644 tests/templates/kuttl/tls/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/tls/00-install-vector-aggregator-discovery-configmap.yaml.j2 rename tests/templates/kuttl/tls/{00-assert.yaml => 01-assert.yaml} (100%) rename tests/templates/kuttl/tls/{00-install-zk.yaml.j2 => 01-install-zk.yaml.j2} (66%) rename tests/templates/kuttl/tls/{01-install-minio-certificates.yaml.j2 => 02-install-minio-certificates.yaml.j2} (100%) rename tests/templates/kuttl/tls/{02-install-minio.yaml.j2 => 03-install-minio.yaml.j2} (100%) rename tests/templates/kuttl/tls/{03-assert.yaml.j2 => 04-assert.yaml.j2} (100%) rename tests/templates/kuttl/tls/{03-install-druid.yaml.j2 => 04-install-druid.yaml.j2} (80%) rename tests/templates/kuttl/tls/{04-assert.yaml => 05-assert.yaml} (100%) rename tests/templates/kuttl/tls/{04-install-checks.yaml.j2 => 05-install-checks.yaml.j2} (100%) diff --git a/tests/templates/kuttl/tls/00-assert.yaml.j2 b/tests/templates/kuttl/tls/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/tls/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/tls/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/tls/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/tls/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/tls/00-assert.yaml b/tests/templates/kuttl/tls/01-assert.yaml similarity index 100% rename from tests/templates/kuttl/tls/00-assert.yaml rename to tests/templates/kuttl/tls/01-assert.yaml diff --git a/tests/templates/kuttl/tls/00-install-zk.yaml.j2 b/tests/templates/kuttl/tls/01-install-zk.yaml.j2 similarity index 66% rename from tests/templates/kuttl/tls/00-install-zk.yaml.j2 rename to tests/templates/kuttl/tls/01-install-zk.yaml.j2 index e3f705b9..9b6e4d5b 100644 --- a/tests/templates/kuttl/tls/00-install-zk.yaml.j2 +++ b/tests/templates/kuttl/tls/01-install-zk.yaml.j2 @@ -7,7 +7,15 @@ spec: image: productVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[0] }}" stackableVersion: "{{ test_scenario['values']['zookeeper-latest'].split('-stackable')[1] }}" +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + logging: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} servers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/tls/01-install-minio-certificates.yaml.j2 b/tests/templates/kuttl/tls/02-install-minio-certificates.yaml.j2 similarity index 100% rename from tests/templates/kuttl/tls/01-install-minio-certificates.yaml.j2 rename to tests/templates/kuttl/tls/02-install-minio-certificates.yaml.j2 diff --git a/tests/templates/kuttl/tls/02-install-minio.yaml.j2 b/tests/templates/kuttl/tls/03-install-minio.yaml.j2 similarity index 100% rename from tests/templates/kuttl/tls/02-install-minio.yaml.j2 rename to tests/templates/kuttl/tls/03-install-minio.yaml.j2 diff --git a/tests/templates/kuttl/tls/03-assert.yaml.j2 b/tests/templates/kuttl/tls/04-assert.yaml.j2 similarity index 100% rename from tests/templates/kuttl/tls/03-assert.yaml.j2 rename to tests/templates/kuttl/tls/04-assert.yaml.j2 diff --git a/tests/templates/kuttl/tls/03-install-druid.yaml.j2 b/tests/templates/kuttl/tls/04-install-druid.yaml.j2 similarity index 80% rename from tests/templates/kuttl/tls/03-install-druid.yaml.j2 rename to tests/templates/kuttl/tls/04-install-druid.yaml.j2 index 90ec1491..c8e3bffe 100644 --- a/tests/templates/kuttl/tls/03-install-druid.yaml.j2 +++ b/tests/templates/kuttl/tls/04-install-druid.yaml.j2 @@ -88,25 +88,43 @@ spec: serverAndInternalSecretClass: druid-tls {% else %} tls: null +{% endif %} +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} zookeeperConfigMapName: druid-znode brokers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 coordinators: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 historicals: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 middleManagers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 routers: + config: + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/tls/04-assert.yaml b/tests/templates/kuttl/tls/05-assert.yaml similarity index 100% rename from tests/templates/kuttl/tls/04-assert.yaml rename to tests/templates/kuttl/tls/05-assert.yaml diff --git a/tests/templates/kuttl/tls/04-install-checks.yaml.j2 b/tests/templates/kuttl/tls/05-install-checks.yaml.j2 similarity index 100% rename from tests/templates/kuttl/tls/04-install-checks.yaml.j2 rename to tests/templates/kuttl/tls/05-install-checks.yaml.j2 From 938903615aac01910263b73f2cb32a9002a20dad Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 16:56:44 +0100 Subject: [PATCH 19/27] Fix custom configuration in logging test --- .../kuttl/logging/04-install-druid.yaml.j2 | 142 ++++-------------- 1 file changed, 27 insertions(+), 115 deletions(-) diff --git a/tests/templates/kuttl/logging/04-install-druid.yaml.j2 b/tests/templates/kuttl/logging/04-install-druid.yaml.j2 index d7aa6fd3..d7184d27 100644 --- a/tests/templates/kuttl/logging/04-install-druid.yaml.j2 +++ b/tests/templates/kuttl/logging/04-install-druid.yaml.j2 @@ -5,6 +5,23 @@ metadata: name: install-druid timeout: 600 --- +apiVersion: v1 +kind: ConfigMap +metadata: + name: druid-log-config +data: + log4j2.properties: |- + appenders = FILE + + appender.FILE.type = File + appender.FILE.name = FILE + appender.FILE.fileName = /stackable/log/druid/druid.log4j2.xml + appender.FILE.layout.type = XMLLayout + + rootLogger.level=INFO + rootLogger.appenderRefs = FILE + rootLogger.appenderRef.FILE.ref = FILE +--- apiVersion: druid.stackable.tech/v1alpha1 kind: DruidCluster metadata: @@ -64,29 +81,8 @@ spec: enableVectorAgent: true containers: druid: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - prepare: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - vector: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO + custom: + configMap: druid-log-config coordinators: roleGroups: automatic-log-config: @@ -126,29 +122,8 @@ spec: enableVectorAgent: true containers: druid: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - prepare: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - vector: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO + custom: + configMap: druid-log-config historicals: roleGroups: automatic-log-config: @@ -188,29 +163,8 @@ spec: enableVectorAgent: true containers: druid: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - prepare: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - vector: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO + custom: + configMap: druid-log-config middleManagers: roleGroups: automatic-log-config: @@ -250,29 +204,8 @@ spec: enableVectorAgent: true containers: druid: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - prepare: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - vector: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO + custom: + configMap: druid-log-config routers: roleGroups: automatic-log-config: @@ -312,26 +245,5 @@ spec: enableVectorAgent: true containers: druid: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - prepare: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO - vector: - console: - level: INFO - file: - level: INFO - loggers: - ROOT: - level: INFO + custom: + configMap: druid-log-config From f95bb6bb0103072bc62bcf651fdb70a83fbeec2e Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 17:00:37 +0100 Subject: [PATCH 20/27] Regenerate charts --- deploy/helm/druid-operator/crds/crds.yaml | 764 ++++++++++++++++++++++ 1 file changed, 764 insertions(+) diff --git a/deploy/helm/druid-operator/crds/crds.yaml b/deploy/helm/druid-operator/crds/crds.yaml index dcbf8dfc..901b0b52 100644 --- a/deploy/helm/druid-operator/crds/crds.yaml +++ b/deploy/helm/druid-operator/crds/crds.yaml @@ -35,6 +35,82 @@ spec: config: default: {} properties: + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` + properties: + console: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + custom: + description: Custom log configuration provided in a ConfigMap + properties: + configMap: + nullable: true + type: string + type: object + file: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + default: {} + type: object + type: object + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object resources: default: memory: @@ -95,6 +171,82 @@ spec: config: default: {} properties: + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` + properties: + console: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + custom: + description: Custom log configuration provided in a ConfigMap + properties: + configMap: + nullable: true + type: string + type: object + file: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + default: {} + type: object + type: object + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object resources: default: memory: @@ -517,6 +669,10 @@ spec: nullable: true type: string type: object + vectorAggregatorConfigMapName: + description: Name of the Vector aggregator discovery ConfigMap. It must contain the key `ADDRESS` with the address of the Vector aggregator. + nullable: true + type: string zookeeperConfigMapName: description: ZooKeeper discovery ConfigMap type: string @@ -536,6 +692,82 @@ spec: config: default: {} properties: + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` + properties: + console: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + custom: + description: Custom log configuration provided in a ConfigMap + properties: + configMap: + nullable: true + type: string + type: object + file: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + default: {} + type: object + type: object + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object resources: default: memory: @@ -596,6 +828,82 @@ spec: config: default: {} properties: + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` + properties: + console: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + custom: + description: Custom log configuration provided in a ConfigMap + properties: + configMap: + nullable: true + type: string + type: object + file: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + default: {} + type: object + type: object + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object resources: default: memory: @@ -697,6 +1005,82 @@ spec: config: default: {} properties: + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` + properties: + console: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + custom: + description: Custom log configuration provided in a ConfigMap + properties: + configMap: + nullable: true + type: string + type: object + file: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + default: {} + type: object + type: object + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object resources: default: memory: @@ -784,6 +1168,82 @@ spec: config: default: {} properties: + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` + properties: + console: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + custom: + description: Custom log configuration provided in a ConfigMap + properties: + configMap: + nullable: true + type: string + type: object + file: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + default: {} + type: object + type: object + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object resources: default: memory: @@ -955,6 +1415,82 @@ spec: config: default: {} properties: + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` + properties: + console: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + custom: + description: Custom log configuration provided in a ConfigMap + properties: + configMap: + nullable: true + type: string + type: object + file: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + default: {} + type: object + type: object + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object resources: default: memory: @@ -1015,6 +1551,82 @@ spec: config: default: {} properties: + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` + properties: + console: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + custom: + description: Custom log configuration provided in a ConfigMap + properties: + configMap: + nullable: true + type: string + type: object + file: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + default: {} + type: object + type: object + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object resources: default: memory: @@ -1116,6 +1728,82 @@ spec: config: default: {} properties: + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` + properties: + console: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + custom: + description: Custom log configuration provided in a ConfigMap + properties: + configMap: + nullable: true + type: string + type: object + file: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + default: {} + type: object + type: object + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object resources: default: memory: @@ -1176,6 +1864,82 @@ spec: config: default: {} properties: + logging: + default: + enableVectorAgent: null + containers: {} + properties: + containers: + additionalProperties: + anyOf: + - required: + - custom + - {} + description: Fragment derived from `ContainerLogConfigChoice` + properties: + console: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + custom: + description: Custom log configuration provided in a ConfigMap + properties: + configMap: + nullable: true + type: string + type: object + file: + nullable: true + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + loggers: + additionalProperties: + properties: + level: + description: Log levels + enum: + - TRACE + - DEBUG + - INFO + - WARN + - ERROR + - FATAL + - NONE + nullable: true + type: string + type: object + default: {} + type: object + type: object + type: object + enableVectorAgent: + nullable: true + type: boolean + type: object resources: default: memory: From 094291564cee4d3537e3eee7e7ea18a0b4409202 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 17:12:01 +0100 Subject: [PATCH 21/27] Upgrade stackableVersions of images --- docs/modules/druid/examples/getting_started/druid.yaml | 2 +- docs/modules/druid/examples/getting_started/hdfs.yaml | 2 +- docs/modules/druid/examples/getting_started/zookeeper.yaml | 2 +- docs/modules/druid/pages/usage.adoc | 2 +- examples/psql-s3/psql-s3-druid-cluster.yaml | 4 ++-- examples/psql/psql-hdfs-druid-cluster.yaml | 6 +++--- examples/tls/tls-druid-cluster.yaml | 6 +++--- rust/crd/test/resources/resource_merge/druid_cluster.yaml | 2 +- rust/crd/test/resources/resource_merge/segment_cache.yaml | 2 +- rust/crd/test/resources/role_service/druid_cluster.yaml | 2 +- .../test/resources/druid_controller/segment_cache.yaml | 2 +- 11 files changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/modules/druid/examples/getting_started/druid.yaml b/docs/modules/druid/examples/getting_started/druid.yaml index 41713e50..053e66e8 100644 --- a/docs/modules/druid/examples/getting_started/druid.yaml +++ b/docs/modules/druid/examples/getting_started/druid.yaml @@ -6,7 +6,7 @@ metadata: spec: image: productVersion: 24.0.0 - stackableVersion: 0.3.0 + stackableVersion: 23.4.0-rc2 clusterConfig: deepStorage: hdfs: diff --git a/docs/modules/druid/examples/getting_started/hdfs.yaml b/docs/modules/druid/examples/getting_started/hdfs.yaml index 1fe31205..f91ccd63 100644 --- a/docs/modules/druid/examples/getting_started/hdfs.yaml +++ b/docs/modules/druid/examples/getting_started/hdfs.yaml @@ -6,7 +6,7 @@ metadata: spec: image: productVersion: 3.3.4 - stackableVersion: 0.2.0 + stackableVersion: 23.4.0-rc1 zookeeperConfigMapName: simple-hdfs-znode dfsReplication: 3 nameNodes: diff --git a/docs/modules/druid/examples/getting_started/zookeeper.yaml b/docs/modules/druid/examples/getting_started/zookeeper.yaml index 913f179f..8f6d2f5b 100644 --- a/docs/modules/druid/examples/getting_started/zookeeper.yaml +++ b/docs/modules/druid/examples/getting_started/zookeeper.yaml @@ -6,7 +6,7 @@ metadata: spec: image: productVersion: 3.8.0 - stackableVersion: 0.8.0 + stackableVersion: 23.4.0-rc2 servers: roleGroups: default: diff --git a/docs/modules/druid/pages/usage.adoc b/docs/modules/druid/pages/usage.adoc index 0d1f0c26..77e79d48 100644 --- a/docs/modules/druid/pages/usage.adoc +++ b/docs/modules/druid/pages/usage.adoc @@ -54,7 +54,7 @@ metadata: spec: image: productVersion: 24.0.0 - stackableVersion: 0.3.0 + stackableVersion: 23.4.0-rc2 clusterConfig: deepStorage: hdfs: diff --git a/examples/psql-s3/psql-s3-druid-cluster.yaml b/examples/psql-s3/psql-s3-druid-cluster.yaml index 1c3d920d..b9acfef2 100644 --- a/examples/psql-s3/psql-s3-druid-cluster.yaml +++ b/examples/psql-s3/psql-s3-druid-cluster.yaml @@ -6,7 +6,7 @@ metadata: spec: image: productVersion: 3.8.0 - stackableVersion: 0.8.0 + stackableVersion: 23.4.0-rc2 servers: roleGroups: default: @@ -47,7 +47,7 @@ metadata: spec: image: productVersion: 24.0.0 - stackableVersion: 0.3.0 + stackableVersion: 23.4.0-rc2 clusterConfig: deepStorage: s3: diff --git a/examples/psql/psql-hdfs-druid-cluster.yaml b/examples/psql/psql-hdfs-druid-cluster.yaml index 4e954cdf..ece82590 100644 --- a/examples/psql/psql-hdfs-druid-cluster.yaml +++ b/examples/psql/psql-hdfs-druid-cluster.yaml @@ -6,7 +6,7 @@ metadata: spec: image: productVersion: 3.8.0 - stackableVersion: 0.8.0 + stackableVersion: 23.4.0-rc2 servers: roleGroups: default: @@ -35,7 +35,7 @@ metadata: spec: image: productVersion: 3.3.4 - stackableVersion: 0.2.0 + stackableVersion: 23.4.0-rc1 zookeeperConfigMapName: psql-druid-znode dfsReplication: 1 nameNodes: @@ -58,7 +58,7 @@ metadata: spec: image: productVersion: 24.0.0 - stackableVersion: 0.3.0 + stackableVersion: 23.4.0-rc2 clusterConfig: deepStorage: hdfs: diff --git a/examples/tls/tls-druid-cluster.yaml b/examples/tls/tls-druid-cluster.yaml index e1f4c7d5..e0a05b39 100644 --- a/examples/tls/tls-druid-cluster.yaml +++ b/examples/tls/tls-druid-cluster.yaml @@ -6,7 +6,7 @@ metadata: spec: image: productVersion: 3.8.0 - stackableVersion: 0.8.0 + stackableVersion: 23.4.0-rc2 servers: roleGroups: default: @@ -27,7 +27,7 @@ metadata: spec: image: productVersion: 3.3.4 - stackableVersion: 0.2.0 + stackableVersion: 23.4.0-rc1 zookeeperConfigMapName: druid-hdfs-znode dfsReplication: 1 nameNodes: @@ -80,7 +80,7 @@ metadata: spec: image: productVersion: 24.0.0 - stackableVersion: 0.3.0 + stackableVersion: 23.4.0-rc2 clusterConfig: authentication: - authenticationClass: druid-mtls-authentication-class diff --git a/rust/crd/test/resources/resource_merge/druid_cluster.yaml b/rust/crd/test/resources/resource_merge/druid_cluster.yaml index 401fb5e0..61ac4c3b 100644 --- a/rust/crd/test/resources/resource_merge/druid_cluster.yaml +++ b/rust/crd/test/resources/resource_merge/druid_cluster.yaml @@ -8,7 +8,7 @@ metadata: spec: image: productVersion: 24.0.0 - stackableVersion: 0.3.0 + stackableVersion: 23.4.0-rc2 clusterConfig: deepStorage: hdfs: diff --git a/rust/crd/test/resources/resource_merge/segment_cache.yaml b/rust/crd/test/resources/resource_merge/segment_cache.yaml index d85ece29..fe830526 100644 --- a/rust/crd/test/resources/resource_merge/segment_cache.yaml +++ b/rust/crd/test/resources/resource_merge/segment_cache.yaml @@ -8,7 +8,7 @@ metadata: spec: image: productVersion: 24.0.0 - stackableVersion: 0.3.0 + stackableVersion: 23.4.0-rc2 clusterConfig: deepStorage: hdfs: diff --git a/rust/crd/test/resources/role_service/druid_cluster.yaml b/rust/crd/test/resources/role_service/druid_cluster.yaml index c6f40787..44a61780 100644 --- a/rust/crd/test/resources/role_service/druid_cluster.yaml +++ b/rust/crd/test/resources/role_service/druid_cluster.yaml @@ -8,7 +8,7 @@ metadata: spec: image: productVersion: 24.0.0 - stackableVersion: 0.3.0 + stackableVersion: 23.4.0-rc2 clusterConfig: deepStorage: hdfs: diff --git a/rust/operator-binary/test/resources/druid_controller/segment_cache.yaml b/rust/operator-binary/test/resources/druid_controller/segment_cache.yaml index fbe62bb5..c8ca8833 100644 --- a/rust/operator-binary/test/resources/druid_controller/segment_cache.yaml +++ b/rust/operator-binary/test/resources/druid_controller/segment_cache.yaml @@ -8,7 +8,7 @@ metadata: spec: image: productVersion: 24.0.0 - stackableVersion: 0.3.0 + stackableVersion: 23.4.0-rc2 clusterConfig: deepStorage: s3: From 2694fe227d53661b3413504d8243a8946e1cbfb6 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 17:30:20 +0100 Subject: [PATCH 22/27] Add section about log aggregation to the documentation --- docs/modules/druid/pages/usage.adoc | 35 +++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/docs/modules/druid/pages/usage.adoc b/docs/modules/druid/pages/usage.adoc index 77e79d48..10fbd026 100644 --- a/docs/modules/druid/pages/usage.adoc +++ b/docs/modules/druid/pages/usage.adoc @@ -390,6 +390,41 @@ The operator creates a `ConfigMap` with the name of the cluster which contains c The managed Druid instances are automatically configured to export Prometheus metrics. See xref:operators:monitoring.adoc[] for more details. +== Log aggregation + +The logs can be forwarded to a Vector log aggregator by providing a discovery +ConfigMap for the aggregator and by enabling the log agent: + +[source,yaml] +---- +spec: + clusterConfig: + vectorAggregatorConfigMapName: vector-aggregator-discovery + brokers: + config: + logging: + enableVectorAgent: true + coordinators: + config: + logging: + enableVectorAgent: true + historicals: + config: + logging: + enableVectorAgent: true + middleManagers: + config: + logging: + enableVectorAgent: true + routers: + config: + logging: + enableVectorAgent: true +---- + +Further information on how to configure logging, can be found in +xref:home:concepts:logging.adoc[]. + == Configuration & Environment Overrides The cluster definition also supports overriding configuration properties and environment variables, either per role or per role group, where the more specific override (role group) has precedence over the less specific one (role). From 19dbb6759c8e4d1de2f826da0b087117851644c9 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Mon, 27 Feb 2023 17:35:16 +0100 Subject: [PATCH 23/27] Update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 36946e54..8e6bd3a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ All notable changes to this project will be documented in this file. ### Added - Add support for non-TLS LDAP authentication. ([#374]) +- Log aggregation added ([#407]). ### Changed @@ -22,6 +23,7 @@ All notable changes to this project will be documented in this file. [#380]: https://github.com/stackabletech/druid-operator/pull/380 [#387]: https://github.com/stackabletech/druid-operator/pull/387 [#404]: https://github.com/stackabletech/druid-operator/pull/404 +[#407]: https://github.com/stackabletech/druid-operator/pull/407 ## [23.1.0] - 2023-01-23 From f92e27e630046ead864e78f2ef091d60ea67c060 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Tue, 28 Feb 2023 10:18:20 +0100 Subject: [PATCH 24/27] Optimize code --- rust/crd/src/lib.rs | 2 +- rust/crd/src/resource.rs | 4 ++-- rust/operator-binary/src/druid_controller.rs | 4 +--- rust/operator-binary/src/product_logging.rs | 2 -- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 9927806a..d53cfe37 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -199,9 +199,9 @@ pub struct DruidClusterSpec { #[serde(rename_all = "kebab-case")] #[strum(serialize_all = "kebab-case")] pub enum Container { + Druid, Prepare, Vector, - Druid, } #[derive(Clone, Debug, Deserialize, JsonSchema, Serialize)] diff --git a/rust/crd/src/resource.rs b/rust/crd/src/resource.rs index b6602236..2b81a5e7 100644 --- a/rust/crd/src/resource.rs +++ b/rust/crd/src/resource.rs @@ -52,8 +52,8 @@ impl RoleResource { pub fn as_memory_limits(&self) -> MemoryLimits { match self { - Self::Druid(r) => r.clone().memory, - Self::Historical(r) => r.clone().memory, + Self::Druid(r) => r.memory.clone(), + Self::Historical(r) => r.memory.clone(), } } diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index abe13141..0785039f 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -303,9 +303,7 @@ pub async fn reconcile_druid(druid: Arc, ctx: Arc) -> Result< let druid_tls_security = DruidTlsSecurity::new_from_druid_cluster(&druid, resolved_authentication_classes); - // False positive, auto-deref breaks type inference - #[allow(clippy::explicit_auto_deref)] - let role_config = transform_all_roles_to_config(&*druid, druid.build_role_properties()); + let role_config = transform_all_roles_to_config(druid.as_ref(), druid.build_role_properties()); let validated_role_config = validate_all_roles_and_groups_config( &resolved_product_image.product_version, &role_config.context(ProductConfigTransformSnafu)?, diff --git a/rust/operator-binary/src/product_logging.rs b/rust/operator-binary/src/product_logging.rs index 14fd34b9..3c869c9a 100644 --- a/rust/operator-binary/src/product_logging.rs +++ b/rust/operator-binary/src/product_logging.rs @@ -29,8 +29,6 @@ pub enum Error { entry: &'static str, cm_name: String, }, - #[snafu(display("crd validation failure"))] - CrdValidationFailure { source: stackable_druid_crd::Error }, #[snafu(display("vectorAggregatorConfigMapName must be set"))] MissingVectorAggregatorAddress, } From 0fcaa74d04b43ee10b4f28253f720986c250e868 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Tue, 28 Feb 2023 10:42:35 +0100 Subject: [PATCH 25/27] Use constants for volume names --- rust/crd/src/resource.rs | 7 +++-- rust/crd/src/security.rs | 16 ++++++---- rust/operator-binary/src/druid_controller.rs | 33 ++++++++++++-------- 3 files changed, 35 insertions(+), 21 deletions(-) diff --git a/rust/crd/src/resource.rs b/rust/crd/src/resource.rs index 2b81a5e7..2250e508 100644 --- a/rust/crd/src/resource.rs +++ b/rust/crd/src/resource.rs @@ -19,6 +19,9 @@ use stackable_operator::{ }; use strum::{EnumDiscriminants, IntoStaticStr}; +// volume names +const SEGMENT_CACHE_VOLUME_NAME: &str = "segment-cache"; + /// This Error cannot derive PartialEq because fragment::ValidationError doesn't derive it #[derive(Snafu, Debug, EnumDiscriminants)] #[strum_discriminants(derive(IntoStaticStr))] @@ -87,9 +90,9 @@ impl RoleResource { pub fn update_volumes_and_volume_mounts(&self, cb: &mut ContainerBuilder, pb: &mut PodBuilder) { if let Self::Historical(r) = self { - cb.add_volume_mount("segment-cache", PATH_SEGMENT_CACHE); + cb.add_volume_mount(SEGMENT_CACHE_VOLUME_NAME, PATH_SEGMENT_CACHE); pb.add_volume( - VolumeBuilder::new("segment-cache") + VolumeBuilder::new(SEGMENT_CACHE_VOLUME_NAME) .empty_dir(EmptyDirVolumeSource { medium: r.storage.segment_cache.empty_dir.medium.clone(), size_limit: Some(r.storage.segment_cache.empty_dir.capacity.clone()), diff --git a/rust/crd/src/security.rs b/rust/crd/src/security.rs index dc6ca4fb..19154fa9 100644 --- a/rust/crd/src/security.rs +++ b/rust/crd/src/security.rs @@ -81,6 +81,10 @@ impl DruidTlsSecurity { const STACKABLE_MOUNT_TLS_DIR: &str = "/stackable/mount_tls"; const STACKABLE_TLS_DIR: &str = "/stackable/tls"; + // volume names + const TLS_VOLUME_NAME: &str = "tls"; + const TLS_MOUNT_VOLUME_NAME: &str = "tls-mount"; + pub fn new( resolved_authentication_classes: ResolvedAuthenticationClasses, server_and_internal_secret_class: Option, @@ -178,7 +182,7 @@ impl DruidTlsSecurity { // uses the same SecretClass as the Druid server itself. if let Some(secret_class) = &self.server_and_internal_secret_class { pod.add_volume( - VolumeBuilder::new("tls-mount") + VolumeBuilder::new(Self::TLS_MOUNT_VOLUME_NAME) .ephemeral( SecretOperatorVolumeSourceBuilder::new(secret_class) .with_pod_scope() @@ -187,16 +191,16 @@ impl DruidTlsSecurity { ) .build(), ); - prepare.add_volume_mount("tls-mount", Self::STACKABLE_MOUNT_TLS_DIR); - druid.add_volume_mount("tls-mount", Self::STACKABLE_MOUNT_TLS_DIR); + prepare.add_volume_mount(Self::TLS_MOUNT_VOLUME_NAME, Self::STACKABLE_MOUNT_TLS_DIR); + druid.add_volume_mount(Self::TLS_MOUNT_VOLUME_NAME, Self::STACKABLE_MOUNT_TLS_DIR); pod.add_volume( - VolumeBuilder::new("tls") + VolumeBuilder::new(Self::TLS_VOLUME_NAME) .with_empty_dir(Option::<&str>::None, None) .build(), ); - prepare.add_volume_mount("tls", Self::STACKABLE_TLS_DIR); - druid.add_volume_mount("tls", Self::STACKABLE_TLS_DIR); + prepare.add_volume_mount(Self::TLS_VOLUME_NAME, Self::STACKABLE_TLS_DIR); + druid.add_volume_mount(Self::TLS_VOLUME_NAME, Self::STACKABLE_TLS_DIR); } Ok(()) } diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index 0785039f..1de5ae9b 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -76,6 +76,13 @@ pub const CONTROLLER_NAME: &str = "druidcluster"; const DOCKER_IMAGE_BASE_NAME: &str = "druid"; +// volume names +const DRUID_CONFIG_VOLUME_NAME: &str = "config"; +const HDFS_CONFIG_VOLUME_NAME: &str = "hdfs"; +const LOG_CONFIG_VOLUME_NAME: &str = "log-config"; +const LOG_VOLUME_NAME: &str = "log"; +const RW_CONFIG_VOLUME_NAME: &str = "rwconfig"; + pub struct Ctx { pub client: stackable_operator::client::Client, pub product_config: ProductConfigManager, @@ -802,8 +809,8 @@ fn build_rolegroup_statefulset( if merged_rolegroup_config.logging.enable_vector_agent { pb.add_container(product_logging::framework::vector_container( resolved_product_image, - "config", - "log", + DRUID_CONFIG_VOLUME_NAME, + LOG_VOLUME_NAME, merged_rolegroup_config .logging .containers @@ -856,9 +863,9 @@ fn add_hdfs_cm_volume_and_volume_mounts( ) { // hdfs deep storage mount if let DeepStorageSpec::HDFS(hdfs) = deep_storage_spec { - cb_druid.add_volume_mount("hdfs", HDFS_CONFIG_DIRECTORY); + cb_druid.add_volume_mount(HDFS_CONFIG_VOLUME_NAME, HDFS_CONFIG_DIRECTORY); pb.add_volume( - VolumeBuilder::new("hdfs") + VolumeBuilder::new(HDFS_CONFIG_VOLUME_NAME) .with_config_map(&hdfs.config_map_name) .build(), ); @@ -903,15 +910,15 @@ fn add_config_volume_and_volume_mounts( cb_druid: &mut ContainerBuilder, pb: &mut PodBuilder, ) { - cb_druid.add_volume_mount("config", DRUID_CONFIG_DIRECTORY); + cb_druid.add_volume_mount(DRUID_CONFIG_VOLUME_NAME, DRUID_CONFIG_DIRECTORY); pb.add_volume( - VolumeBuilder::new("config") + VolumeBuilder::new(DRUID_CONFIG_VOLUME_NAME) .with_config_map(rolegroup_ref.object_name()) .build(), ); - cb_druid.add_volume_mount("rwconfig", RW_CONFIG_DIRECTORY); + cb_druid.add_volume_mount(RW_CONFIG_VOLUME_NAME, RW_CONFIG_DIRECTORY); pb.add_volume( - VolumeBuilder::new("rwconfig") + VolumeBuilder::new(RW_CONFIG_VOLUME_NAME) .with_empty_dir(Some(""), None) .build(), ); @@ -923,7 +930,7 @@ fn add_log_config_volume_and_volume_mounts( cb_druid: &mut ContainerBuilder, pb: &mut PodBuilder, ) { - cb_druid.add_volume_mount("log-config", LOG_CONFIG_DIRECTORY); + cb_druid.add_volume_mount(LOG_CONFIG_VOLUME_NAME, LOG_CONFIG_DIRECTORY); let config_map = if let Some(ContainerLogConfig { choice: @@ -941,7 +948,7 @@ fn add_log_config_volume_and_volume_mounts( }; pb.add_volume( - VolumeBuilder::new("log-config") + VolumeBuilder::new(LOG_CONFIG_VOLUME_NAME) .with_config_map(config_map) .build(), ); @@ -952,10 +959,10 @@ fn add_log_volume_and_volume_mounts( cb_prepare: &mut ContainerBuilder, pb: &mut PodBuilder, ) { - cb_druid.add_volume_mount("log", LOG_DIR); - cb_prepare.add_volume_mount("log", LOG_DIR); + cb_druid.add_volume_mount(LOG_VOLUME_NAME, LOG_DIR); + cb_prepare.add_volume_mount(LOG_VOLUME_NAME, LOG_DIR); pb.add_volume( - VolumeBuilder::new("log") + VolumeBuilder::new(LOG_VOLUME_NAME) .with_empty_dir( Some(""), Some(Quantity(format!("{LOG_VOLUME_SIZE_IN_MIB}Mi"))), From 3f11f5c2a89328753abbd726ed3dd1ff630b27a7 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Tue, 28 Feb 2023 15:14:54 +0100 Subject: [PATCH 26/27] Set stackableVersion of hadoop to 23.4.0-rc2 --- docs/modules/druid/examples/getting_started/hdfs.yaml | 2 +- examples/psql/psql-hdfs-druid-cluster.yaml | 2 +- examples/tls/tls-druid-cluster.yaml | 2 +- tests/test-definition.yaml | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/modules/druid/examples/getting_started/hdfs.yaml b/docs/modules/druid/examples/getting_started/hdfs.yaml index f91ccd63..62a1a53e 100644 --- a/docs/modules/druid/examples/getting_started/hdfs.yaml +++ b/docs/modules/druid/examples/getting_started/hdfs.yaml @@ -6,7 +6,7 @@ metadata: spec: image: productVersion: 3.3.4 - stackableVersion: 23.4.0-rc1 + stackableVersion: 23.4.0-rc2 zookeeperConfigMapName: simple-hdfs-znode dfsReplication: 3 nameNodes: diff --git a/examples/psql/psql-hdfs-druid-cluster.yaml b/examples/psql/psql-hdfs-druid-cluster.yaml index ece82590..5d3b648f 100644 --- a/examples/psql/psql-hdfs-druid-cluster.yaml +++ b/examples/psql/psql-hdfs-druid-cluster.yaml @@ -35,7 +35,7 @@ metadata: spec: image: productVersion: 3.3.4 - stackableVersion: 23.4.0-rc1 + stackableVersion: 23.4.0-rc2 zookeeperConfigMapName: psql-druid-znode dfsReplication: 1 nameNodes: diff --git a/examples/tls/tls-druid-cluster.yaml b/examples/tls/tls-druid-cluster.yaml index e0a05b39..f1da07f5 100644 --- a/examples/tls/tls-druid-cluster.yaml +++ b/examples/tls/tls-druid-cluster.yaml @@ -27,7 +27,7 @@ metadata: spec: image: productVersion: 3.3.4 - stackableVersion: 23.4.0-rc1 + stackableVersion: 23.4.0-rc2 zookeeperConfigMapName: druid-hdfs-znode dfsReplication: 1 nameNodes: diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index fd3df2fc..24ccc073 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -19,10 +19,10 @@ dimensions: - 0.45.0-stackable23.4.0-rc2 - name: hadoop values: - - 3.3.4-stackable23.4.0-rc1 + - 3.3.4-stackable23.4.0-rc2 - name: hadoop-latest values: - - 3.3.4-stackable23.4.0-rc1 + - 3.3.4-stackable23.4.0-rc2 - name: s3-use-tls values: - "true" From d2059267535d018915dedc5c264a2b3d578ea9d4 Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Wed, 1 Mar 2023 09:30:19 +0100 Subject: [PATCH 27/27] Use variables for container names --- rust/operator-binary/src/druid_controller.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/rust/operator-binary/src/druid_controller.rs b/rust/operator-binary/src/druid_controller.rs index 1de5ae9b..c22cc4b9 100644 --- a/rust/operator-binary/src/druid_controller.rs +++ b/rust/operator-binary/src/druid_controller.rs @@ -682,15 +682,17 @@ fn build_rolegroup_statefulset( })?; // init container builder - let mut cb_prepare = ContainerBuilder::new(&Container::Prepare.to_string()).context( + let prepare_container_name = Container::Prepare.to_string(); + let mut cb_prepare = ContainerBuilder::new(&prepare_container_name).context( FailedContainerBuilderCreationSnafu { - name: Container::Prepare.to_string(), + name: &prepare_container_name, }, )?; // druid container builder - let mut cb_druid = ContainerBuilder::new(&Container::Druid.to_string()).context( + let druid_container_name = Container::Druid.to_string(); + let mut cb_druid = ContainerBuilder::new(&druid_container_name).context( FailedContainerBuilderCreationSnafu { - name: Container::Druid.to_string(), + name: &druid_container_name, }, )?; // init pod builder @@ -744,7 +746,7 @@ fn build_rolegroup_statefulset( { prepare_container_command.push(product_logging::framework::capture_shell_output( LOG_DIR, - &Container::Prepare.to_string(), + &prepare_container_name, log_config, )); }