From a2ff728eca493e00e9567b4f64be72b52c68156a Mon Sep 17 00:00:00 2001 From: Janusz Jakubiec Date: Wed, 18 Sep 2024 16:11:44 +0200 Subject: [PATCH 01/12] Adding new preset: cockroachdb_cets. Adding new database: CockroachDB. It is set up in a docker container when starting the cockroachdb_cets preset. In the cockroachdb.sql file, all SERIAL ids are being replaced by UUID's. It still requires some testing, but it is the suggested by the creators of CockroachDB, as when the database is distributed, the randomly generated UUID type performs much better than SERIAL. The `family` column in `vcard_search` table is temporarly renamed to "family", beacuse `family` is a reserved keyword in CockroachDB. Because of that change vcard is currently not working properly and requires further investigation. Current solution is only to ensure that the database is created during schema migration. --- big_tests/test.config | 30 ++ priv/cockroachdb.sql | 502 +++++++++++++++++++ tools/db-versions.sh | 4 + tools/db_configs/cockroachdb/create_user.sql | 3 + tools/docker-setup-cockroachdb.sh | 25 + tools/setup-db.sh | 38 +- 6 files changed, 601 insertions(+), 1 deletion(-) create mode 100644 priv/cockroachdb.sql create mode 100644 tools/db_configs/cockroachdb/create_user.sql create mode 100644 tools/docker-setup-cockroachdb.sh diff --git a/big_tests/test.config b/big_tests/test.config index bc149cf45af..2b0213de6bb 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -265,6 +265,36 @@ connection.password = \"mongooseim_secret\" connection.tls.required = true connection.tls.cacertfile = \"priv/ssl/cacert.pem\" + connection.tls.server_name_indication.enabled = false"}, + {service_domain_db, ""}, + {mod_vcard, " backend = \"rdbms\" + host = \"vjud.@HOST@\"\n"}, + {mod_roster, " backend = \"rdbms\"\n"}]}, + {cockroachdb_cets, + [{dbs, [redis, cockroachdb]}, + {sm_backend, "\"cets\""}, + {bosh_backend, "\"cets\""}, + {component_backend, "\"cets\""}, + {s2s_backend, "\"cets\""}, + {stream_management_backend, cets}, + {auth_anonymous_backend, cets}, + {auth_method, "rdbms"}, + {internal_databases, "[internal_databases.cets] + cluster_name = \"{{cluster_name}}\""}, + {outgoing_pools, "[outgoing_pools.redis.global_distrib] + scope = \"global\" + workers = 10 +[outgoing_pools.rdbms.default] + scope = \"global\" + workers = 5 + connection.driver = \"pgsql\" + connection.host = \"localhost\" + connection.port = 26257 + connection.database = \"mongooseim\" + connection.username = \"mongooseim\" + connection.password = \"mongooseim_secret\" + connection.tls.required = true + connection.tls.cacertfile = \"priv/ssl/cacert.pem\" connection.tls.server_name_indication.enabled = false"}, {service_domain_db, ""}, {mod_vcard, " backend = \"rdbms\" diff --git a/priv/cockroachdb.sql b/priv/cockroachdb.sql new file mode 100644 index 00000000000..fd9f770bcd9 --- /dev/null +++ b/priv/cockroachdb.sql @@ -0,0 +1,502 @@ +-- +-- mongooseim, Copyright (C) 2024 Erlang Solutions +-- +-- This program is free software; you can redistribute it and/or +-- modify it under the terms of the GNU General Public License as +-- published by the Free Software Foundation; either version 2 of the +-- License, or (at your option) any later version. +-- +-- This program is distributed in the hope that it will be useful, +-- but WITHOUT ANY WARRANTY; without even the implied warranty of +-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +-- General Public License for more details. +-- +-- You should have received a copy of the GNU General Public License +-- along with this program; if not, write to the Free Software +-- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +-- +USE mongooseim; + +CREATE TYPE test_enum_char AS ENUM('A','B', 'C'); +CREATE TABLE test_types( + unicode text, + unicode250 varchar(250), + binary_data_8k bytea, -- byte a has 1 GB limit + binary_data_65k bytea, + binary_data_16m bytea, + ascii_char character(1), + ascii_string varchar(250), + int32 integer, + int64 bigint, + int8 smallint, -- has no tinyint, so the next one is 2-bytes smallint + enum_char test_enum_char, + bool_flag boolean +); + +CREATE TABLE users ( + username varchar(250), + server varchar(250), + "password" text NOT NULL, + pass_details text, + created_at TIMESTAMP NOT NULL DEFAULT now(), + PRIMARY KEY (server, username) +); + + +CREATE TABLE last ( + server varchar(250), + username varchar(250), + seconds integer NOT NULL, + state text NOT NULL, + PRIMARY KEY (server, username) +); + +CREATE INDEX i_last_server_seconds ON last USING btree (server, seconds); + +CREATE TABLE rosterusers ( + server varchar(250) NOT NULL, + username varchar(250) NOT NULL, + jid text NOT NULL, + nick text NOT NULL, + subscription character(1) NOT NULL, + ask character(1) NOT NULL, + askmessage text NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT now(), + PRIMARY KEY (server, username, jid) +); + +CREATE TABLE rostergroups ( + server varchar(250) NOT NULL, + username varchar(250) NOT NULL, + jid text NOT NULL, + grp text NOT NULL, + PRIMARY KEY (server, username, jid, grp) +); + +CREATE TABLE roster_version ( + server varchar(250), + username varchar(250), + version text NOT NULL, + PRIMARY KEY (server, username) +); + +CREATE TABLE vcard ( + username varchar(150), + server varchar(100), + vcard text NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT now(), + PRIMARY KEY (server, username) +); + +CREATE TABLE vcard_search ( + username varchar(150) NOT NULL, + lusername varchar(100), + server varchar(250), + fn text NOT NULL, + lfn text NOT NULL, + "family" text NOT NULL, + lfamily text NOT NULL, + given text NOT NULL, + lgiven text NOT NULL, + middle text NOT NULL, + lmiddle text NOT NULL, + nickname text NOT NULL, + lnickname text NOT NULL, + bday text NOT NULL, + lbday text NOT NULL, + ctry text NOT NULL, + lctry text NOT NULL, + locality text NOT NULL, + llocality text NOT NULL, + email text NOT NULL, + lemail text NOT NULL, + orgname text NOT NULL, + lorgname text NOT NULL, + orgunit text NOT NULL, + lorgunit text NOT NULL, + PRIMARY KEY (server, lusername) +); + +CREATE INDEX i_vcard_search_lfn ON vcard_search(lfn); +CREATE INDEX i_vcard_search_lfamily ON vcard_search(lfamily); +CREATE INDEX i_vcard_search_lgiven ON vcard_search(lgiven); +CREATE INDEX i_vcard_search_lmiddle ON vcard_search(lmiddle); +CREATE INDEX i_vcard_search_lnickname ON vcard_search(lnickname); +CREATE INDEX i_vcard_search_lbday ON vcard_search(lbday); +CREATE INDEX i_vcard_search_lctry ON vcard_search(lctry); +CREATE INDEX i_vcard_search_llocality ON vcard_search(llocality); +CREATE INDEX i_vcard_search_lemail ON vcard_search(lemail); +CREATE INDEX i_vcard_search_lorgname ON vcard_search(lorgname); +CREATE INDEX i_vcard_search_lorgunit ON vcard_search(lorgunit); + +CREATE TABLE privacy_default_list ( + server varchar(250), + username varchar(250), + name text NOT NULL, + PRIMARY KEY (server, username) +); + +CREATE TABLE privacy_list ( + server varchar(250) NOT NULL, + username varchar(250) NOT NULL, + name text NOT NULL, + id UUID UNIQUE DEFAULT gen_random_uuid(), + created_at TIMESTAMP NOT NULL DEFAULT now(), + PRIMARY KEY (server, username, name) +); + +CREATE TABLE privacy_list_data ( + id UUID REFERENCES privacy_list(id) ON DELETE CASCADE, + t character(1) NOT NULL, + value text NOT NULL, + action character(1) NOT NULL, + ord INT NOT NULL, + match_all boolean NOT NULL, + match_iq boolean NOT NULL, + match_message boolean NOT NULL, + match_presence_in boolean NOT NULL, + match_presence_out boolean NOT NULL, + PRIMARY KEY (id, ord) +); + +CREATE TABLE private_storage ( + server varchar(250) NOT NULL, + username varchar(250) NOT NULL, + namespace varchar(250) NOT NULL, + data text NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT now(), + PRIMARY KEY(server, username, namespace) +); + +CREATE TYPE mam_behaviour AS ENUM('A', 'N', 'R'); +CREATE TYPE mam_direction AS ENUM('I','O'); + +CREATE TABLE mam_message( + -- Message UID (64 bits) + -- A server-assigned UID that MUST be unique within the archive. + id BIGINT NOT NULL, + user_id INT NOT NULL, + -- FromJID used to form a message without looking into stanza. + -- This value will be send to the client "as is". + from_jid varchar(250) NOT NULL, + -- The remote JID that the stanza is to (for an outgoing message) or from (for an incoming message). + -- This field is for sorting and filtering. + remote_bare_jid varchar(250) NOT NULL, + remote_resource varchar(250) NOT NULL, + -- I - incoming, remote_jid is a value from From. + -- O - outgoing, remote_jid is a value from To. + -- Has no meaning for MUC-rooms. + direction mam_direction NOT NULL, + -- Term-encoded message packet + message bytea NOT NULL, + search_body text, + origin_id varchar, + is_groupchat boolean NOT NULL, + PRIMARY KEY(user_id, id) +); +CREATE INDEX i_mam_message_username_jid_id + ON mam_message + USING BTREE + (user_id, remote_bare_jid, id); +CREATE INDEX i_mam_message_username_jid_origin_id + ON mam_message + USING BTREE + (user_id, remote_bare_jid, origin_id); + +CREATE TABLE mam_config( + user_id INT NOT NULL, + -- If empty, than it is a default behaviour. + remote_jid varchar(250) NOT NULL, + -- A - always archive; + -- N - never archive; + -- R - roster (only for remote_jid == "") + behaviour mam_behaviour NOT NULL, + PRIMARY KEY(user_id, remote_jid) +); + +CREATE TABLE mam_server_user( + id UUID UNIQUE PRIMARY KEY DEFAULT gen_random_uuid(), + server varchar(250) NOT NULL, + user_name varchar(250) NOT NULL +); +CREATE UNIQUE INDEX i_mam_server_user_name + ON mam_server_user + USING BTREE + (server, user_name); + + +CREATE TABLE mam_muc_message( + -- Message UID + -- A server-assigned UID that MUST be unique within the archive. + id BIGINT NOT NULL, + room_id INT NOT NULL, + sender_id INT NOT NULL, + -- A nick of the message's originator + nick_name varchar(250) NOT NULL, + -- Term-encoded message packet + message bytea NOT NULL, + search_body text, + origin_id varchar, + PRIMARY KEY (room_id, id) +); + +CREATE INDEX i_mam_muc_message_sender_id ON mam_muc_message USING BTREE (sender_id); +CREATE INDEX i_mam_muc_message_room_id_sender_id_origin_id ON mam_muc_message USING BTREE (room_id, sender_id, origin_id); + +CREATE TABLE offline_message( + id UUID UNIQUE PRIMARY Key DEFAULT gen_random_uuid(), + timestamp BIGINT NOT NULL, + expire BIGINT, + server varchar(250) NOT NULL, + username varchar(250) NOT NULL, + from_jid varchar(250) NOT NULL, + packet text NOT NULL, + permanent_fields bytea +); +CREATE INDEX i_offline_message + ON offline_message + USING BTREE + (server, username, id); + +CREATE TABLE auth_token( + owner TEXT NOT NULL PRIMARY KEY, + seq_no BIGINT NOT NULL +); + +CREATE TABLE muc_light_rooms( + id UUID NOT NULL UNIQUE DEFAULT gen_random_uuid(), + luser VARCHAR(250) NOT NULL, + lserver VARCHAR(250) NOT NULL, + version VARCHAR(20) NOT NULL, + PRIMARY KEY (lserver, luser) +); + +CREATE TABLE muc_rooms( + id UUID NOT NULL UNIQUE DEFAULT gen_random_uuid(), + muc_host VARCHAR(250) NOT NULL, + room_name VARCHAR(250) NOT NULL, + options JSON NOT NULL, + PRIMARY KEY (muc_host, room_name) +); + +CREATE TABLE muc_room_aff( + room_id UUID NOT NULL REFERENCES muc_rooms(id), + luser VARCHAR(250) NOT NULL, + lserver VARCHAR(250) NOT NULL, + resource VARCHAR(250) NOT NULL, + aff SMALLINT NOT NULL +); + +CREATE INDEX i_muc_room_aff_id ON muc_room_aff (room_id); + +CREATE TABLE muc_registered( + muc_host VARCHAR(250) NOT NULL, + luser VARCHAR(250) NOT NULL, + lserver VARCHAR(250) NOT NULL, + nick VARCHAR(250) NOT NULL, + PRIMARY KEY (muc_host, luser, lserver) +); + +CREATE TABLE muc_light_occupants( + room_id UUID NOT NULL REFERENCES muc_light_rooms(id), + luser VARCHAR(250) NOT NULL, + lserver VARCHAR(250) NOT NULL, + aff SMALLINT NOT NULL +); + +CREATE INDEX i_muc_light_occupants_id ON muc_light_occupants (room_id); +CREATE INDEX i_muc_light_occupants_us ON muc_light_occupants (lserver, luser); + +CREATE TABLE muc_light_config( + room_id UUID NOT NULL REFERENCES muc_light_rooms(id), + opt VARCHAR(100) NOT NULL, + val VARCHAR(250) NOT NULL +); + +CREATE INDEX i_muc_light_config ON muc_light_config (room_id); + +CREATE TABLE muc_light_blocking( + luser VARCHAR(250) NOT NULL, + lserver VARCHAR(250) NOT NULL, + what SMALLINT NOT NULL, + who VARCHAR(500) NOT NULL +); + +CREATE INDEX i_muc_light_blocking_su ON muc_light_blocking (lserver, luser); + +CREATE TABLE inbox ( + luser VARCHAR(250) NOT NULL, + lserver VARCHAR(250) NOT NULL, + remote_bare_jid VARCHAR(250) NOT NULL, + msg_id VARCHAR(250), + box VARCHAR(64) NOT NULL DEFAULT 'inbox', + content BYTEA NOT NULL, + timestamp BIGINT NOT NULL, + muted_until BIGINT DEFAULT 0, + unread_count INT NOT NULL, + PRIMARY KEY(lserver, luser, remote_bare_jid)); + +CREATE INDEX i_inbox_timestamp ON inbox USING BTREE(lserver, luser, timestamp); +CREATE INDEX i_inbox_us_box ON inbox USING BTREE(lserver, luser, box); +CREATE INDEX i_inbox_box ON inbox (box) WHERE (box = 'bin'); + +CREATE TABLE pubsub_nodes ( + nidx UUID PRIMARY KEY DEFAULT gen_random_uuid(), + p_key VARCHAR(250) NOT NULL, + name VARCHAR(250) NOT NULL, + type VARCHAR(250) NOT NULL, + owners JSON NOT NULL, + options JSON NOT NULL +); + +CREATE UNIQUE INDEX i_pubsub_nodes_key_name ON pubsub_nodes USING btree (p_key, name); + +CREATE TABLE pubsub_node_collections ( + name VARCHAR(250) NOT NULL, + parent_name VARCHAR(250) NOT NULL, + PRIMARY KEY(name, parent_name) +); + +CREATE TABLE pubsub_affiliations ( + nidx BIGINT NOT NULL, + luser VARCHAR(250) NOT NULL, + lserver VARCHAR(250) NOT NULL, + aff SMALLINT NOT NULL, + PRIMARY KEY(luser, lserver, nidx) +); + +CREATE INDEX i_pubsub_affiliations_nidx ON pubsub_affiliations(nidx); + +CREATE TABLE pubsub_items ( + nidx BIGINT NOT NULL, + itemid VARCHAR(250) NOT NULL, + created_luser VARCHAR(250) NOT NULL, + created_lserver VARCHAR(250) NOT NULL, + created_at BIGINT NOT NULL, + modified_luser VARCHAR(250) NOT NULL, + modified_lserver VARCHAR(250) NOT NULL, + modified_lresource VARCHAR(250) NOT NULL, + modified_at BIGINT NOT NULL, + publisher TEXT, + payload TEXT NOT NULL, + PRIMARY KEY(nidx, itemid) +); + +CREATE TABLE pubsub_last_item ( + nidx BIGINT NOT NULL, + itemid VARCHAR(250) NOT NULL, + created_luser VARCHAR(250) NOT NULL, + created_lserver VARCHAR(250) NOT NULL, + created_at BIGINT NOT NULL, + payload TEXT NOT NULL, + PRIMARY KEY (nidx) +); + +-- we skip luser and lserver in this one as this is little chance (even impossible?) +-- to have itemid duplication for distinct users +CREATE INDEX i_pubsub_items_lus_nidx ON pubsub_items(created_luser, created_lserver, nidx); +CREATE INDEX i_pubsub_items_nidx ON pubsub_items(nidx); + + +CREATE TABLE pubsub_subscriptions ( + nidx BIGINT NOT NULL, + luser VARCHAR(250) NOT NULL, + lserver VARCHAR(250) NOT NULL, + lresource VARCHAR(250) NOT NULL, + type SMALLINT NOT NULL, + sub_id VARCHAR(125) NOT NULL, + options JSON NOT NULL +); + +CREATE INDEX i_pubsub_subscriptions_lus_nidx ON pubsub_subscriptions(luser, lserver, nidx); +CREATE INDEX i_pubsub_subscriptions_nidx ON pubsub_subscriptions(nidx); + +CREATE TABLE event_pusher_push_subscription ( + owner_jid VARCHAR(250), + node VARCHAR(250), + pubsub_jid VARCHAR(250), + form JSON NOT NULL, + created_at BIGINT NOT NULL, + PRIMARY KEY(owner_jid, node, pubsub_jid) + ); + +CREATE INDEX i_event_pusher_push_subscription ON event_pusher_push_subscription(owner_jid); + +CREATE TABLE mongoose_cluster_id ( + k varchar(50) PRIMARY KEY, + v text +); + +-- chat marker types: +-- 'R' - received +-- 'D' - displayed +-- 'A' - acknowledged +CREATE TYPE chat_marker_type AS ENUM('R', 'D', 'A'); + +CREATE TABLE smart_markers ( + lserver VARCHAR(250) NOT NULL, + luser VARCHAR(250) NOT NULL, + to_jid VARCHAR(250) NOT NULL, + thread VARCHAR(250) NOT NULL, + type chat_marker_type NOT NULL, + msg_id VARCHAR(250) NOT NULL, + timestamp BIGINT NOT NULL, + PRIMARY KEY(lserver, luser, to_jid, thread, type) +); + +CREATE INDEX i_smart_markers ON smart_markers(to_jid, thread); + + +CREATE TABLE offline_markers ( + jid VARCHAR(250) NOT NULL, + thread VARCHAR(250) NOT NULL, + room VARCHAR(250) NOT NULL, + timestamp BIGINT NOT NULL, + PRIMARY KEY(jid, thread, room) +); + +CREATE INDEX i_offline_markers ON offline_markers(jid); + +CREATE TABLE domain_admins( + domain VARCHAR(250) NOT NULL, + pass_details TEXT NOT NULL, + PRIMARY KEY(domain) +); + +-- Mapping from domain hostname to host_type. +-- Column id is used for ordering only. +CREATE TABLE domain_settings ( + id UUID NOT NULL UNIQUE DEFAULT gen_random_uuid(), + domain VARCHAR(250) NOT NULL, + host_type VARCHAR(250) NOT NULL, + status SMALLINT NOT NULL DEFAULT 1, + PRIMARY KEY(domain) +); + +-- A new record is inserted into domain_events, each time +-- domain_settings table is updated: i.e. when a domain is removed, +-- inserted, enabled or disabled. +-- Column id is used for ordering and not related to domain_settings.id. +CREATE TABLE domain_events ( + id UUID NOT NULL DEFAULT gen_random_uuid(), + domain VARCHAR(250) NOT NULL, + PRIMARY KEY(id) +); +CREATE INDEX i_domain_events_domain ON domain_events(domain); + +CREATE TABLE discovery_nodes ( + cluster_name varchar(250) NOT NULL, + node_name varchar(250) NOT NULL, + node_num INT NOT NULL, + address varchar(250) NOT NULL DEFAULT '', -- empty means we should ask DNS + updated_timestamp BIGINT NOT NULL, -- in seconds + PRIMARY KEY (node_name) +); +CREATE UNIQUE INDEX i_discovery_nodes_node_num ON discovery_nodes USING BTREE(cluster_name, node_num); + +CREATE TABLE caps ( + node varchar(250) NOT NULL, + sub_node varchar(250) NOT NULL, + features text NOT NULL, + PRIMARY KEY (node, sub_node) +); diff --git a/tools/db-versions.sh b/tools/db-versions.sh index 288a3d8f7cc..f04b8fe04b6 100644 --- a/tools/db-versions.sh +++ b/tools/db-versions.sh @@ -7,6 +7,8 @@ MYSQL_VERSION_DEFAULT="8.0.30" PGSQL_VERSION_DEFAULT=latest +COCKROACHDB_VERSION_DEFAULT="latest" + MSSQL_VERSION_DEFAULT="2022-CU12-ubuntu-22.04" LDAP_VERSION_DEFAULT="1.5.0" @@ -27,6 +29,8 @@ MYSQL_VERSION=${MYSQL_VERSION:-$MYSQL_VERSION_DEFAULT} PGSQL_VERSION=${PGSQL_VERSION:-$PGSQL_VERSION_DEFAULT} +COCKROACHDB_VERSION=${COCKROACHDB_VERSION:-$COCKROACHDB_VERSION_DEFAULT} + MSSQL_VERSION=${MSSQL_VERSION:-$MSSQL_VERSION_DEFAULT} LDAP_VERSION=${LDAP_VERSION:-$LDAP_VERSION_DEFAULT} diff --git a/tools/db_configs/cockroachdb/create_user.sql b/tools/db_configs/cockroachdb/create_user.sql new file mode 100644 index 00000000000..2598c4af25e --- /dev/null +++ b/tools/db_configs/cockroachdb/create_user.sql @@ -0,0 +1,3 @@ +CREATE USER mongooseim WITH PASSWORD 'mongooseim_secret'; +GRANT admin TO mongooseim; +GRANT ALL ON DATABASE mongooseim to mongooseim; diff --git a/tools/docker-setup-cockroachdb.sh b/tools/docker-setup-cockroachdb.sh new file mode 100644 index 00000000000..f402a155053 --- /dev/null +++ b/tools/docker-setup-cockroachdb.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +certs_dir="certs" +lifetime="--lifetime=1h" +default_listen_addr_host="127.0.0.1" +advertise_addr_host=$default_listen_addr_host + +# Copying certificates to CockroachDB's certs_dir +cp -r /tmp /cockroach/$certs_dir + +# Changing permissions +chmod 777 /cockroach/$certs_dir +chmod -R 740 /cockroach/$certs_dir + +# Generating node certificate +/cockroach/cockroach cert create-node --certs-dir="$certs_dir" \ + --ca-key="$certs_dir"/ca.key "$advertise_addr_host" "$default_listen_addr_host" $lifetime + +# Generating root certificate +/cockroach/cockroach cert create-client --certs-dir="$certs_dir" \ + --ca-key="$certs_dir"/ca.key $lifetime root + +# Starting original entrypoint, that launches CockroachDB +echo "Start original entrypoint /cockroach/cockroach.sh start-single-node" +eval "/cockroach/cockroach.sh start-single-node" diff --git a/tools/setup-db.sh b/tools/setup-db.sh index 16b2f916ae4..e7cbfb4c87d 100755 --- a/tools/setup-db.sh +++ b/tools/setup-db.sh @@ -2,7 +2,6 @@ # Environment variable DB is used by this script. # If DB is undefined, than this script does nothing. - # Docker for Mac should be used on Mac (not docker-machine!) # https://store.docker.com/editions/community/docker-ce-desktop-mac @@ -105,6 +104,43 @@ function setup_db(){ mkdir -p ${PGSQL_ODBC_CERT_DIR} cp ${SSLDIR}/ca/cacert.pem ${PGSQL_ODBC_CERT_DIR}/root.crt + elif [ "$db" = 'cockroachdb' ]; then + NAME=$(db_name cockroachdb) + COCKROACHDB_PORT=${COCKROACHDB_PORT:-26257} + + echo "Configuring CockroachDB with SSL" + $DOCKER rm -v -f $NAME || echo "Skip removing previous container" + + COCKROACH_SQL=$(cat32 priv/cockroachdb.sql) + COCKROACH_USER_SQL=$(cat32 tools/db_configs/cockroachdb/create_user.sql) + COCKROACH_SETUP=$(cat32 tools/docker-setup-cockroachdb.sh) + + DB_CACERT=$(cat32 tools/ssl/ca/cacert.pem) + DB_CAKEY=$(cat32 tools/ssl/ca/cakey.pem) + MIM_CERT=$(cat32 tools/ssl/mongooseim/cert.pem) + MIM_KEY=$(cat32 tools/ssl/mongooseim/key.pem) + + IMAGE=cockroachdb/cockroach:$COCKROACHDB_VERSION + $DOCKER run -d --name=$NAME \ + -p $COCKROACHDB_PORT:26257 \ + -e COCKROACH_DATABASE=mongooseim \ + -e OLD_ENTRYPOINT="chmod 777 /start.sh && /start.sh" \ + -e ENV_FILE_SETUP_PATH="/start.sh" \ + -e ENV_FILE_SETUP_DATA="$COCKROACH_SETUP" \ + -e ENV_FILE_SQL_PATH="/docker-entrypoint-initdb.d/init.sql" \ + -e ENV_FILE_SQL_DATA="$COCKROACH_SQL" \ + -e ENV_FILE_USER_PATH="/docker-entrypoint-initdb.d/create_user.sql" \ + -e ENV_FILE_USER_DATA="$COCKROACH_USER_SQL" \ + -e ENV_FILE_CACERT_PATH="/tmp/ca.key" \ + -e ENV_FILE_CACERT_DATA="$DB_CAKEY" \ + -e ENV_FILE_CAKEY_PATH="/tmp/ca.crt" \ + -e ENV_FILE_CAKEY_DATA="$DB_CACERT" \ + -e ENV_FILE_CERT_PATH="/tmp/client.mongooseim.crt" \ + -e ENV_FILE_CERT_DATA="$MIM_CERT" \ + -e ENV_FILE_KEY_PATH="/tmp/client.mongooseim.key" \ + -e ENV_FILE_KEY_DATA="$MIM_KEY" \ + --entrypoint=/bin/sh $IMAGE -c "$ENTRYPOINT" + elif [ "$db" = 'cassandra' ]; then NAME=$(db_name cassandra) PROXY_NAME=$(db_name cassandra-proxy) From b778ca8bac953d48a22cb3426328d3f0bd87ecd3 Mon Sep 17 00:00:00 2001 From: Janusz Jakubiec Date: Wed, 18 Sep 2024 16:23:53 +0200 Subject: [PATCH 02/12] Adding newly created cockroachdb_cets preset to circleci. --- .circleci/template.yml | 38 ++++++++++++++++++++++++++++++++- tools/circle-generate-config.sh | 10 +++++++++ tools/circle-wait-for-db.sh | 4 ++++ tools/db-versions.sh | 2 +- 4 files changed, 52 insertions(+), 2 deletions(-) diff --git a/.circleci/template.yml b/.circleci/template.yml index 6500651ce29..fe36b9dfd87 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -54,6 +54,26 @@ containers: - ENV_FILE_KEY_PATH: "/tmp/sql/fake_key.pem" - ENV_FILE_KEY_DATA: "__MIM_KEY__" entrypoint: *ENTRYPOINT + - &cockroachdb_container + image: cockroachdb/cockroach:__COCKROACHDB_VERSION__ + environment: + - COCKROACH_DATABASE: mongooseim + - OLD_ENTRYPOINT: "chmod 777 /start.sh && /start.sh" + - ENV_FILE_SETUP_PATH: "/start.sh" + - ENV_FILE_SETUP_DATA: "__COCKROACHDB_SETUP__" + - ENV_FILE_SQL_PATH: "/docker-entrypoint-initdb.d/init.sql" + - ENV_FILE_SQL_DATA: "__COCKROACHDB_SQL__" + - ENV_FILE_USER_PATH: "/docker-entrypoint-initdb.d/create_user.sql" + - ENV_FILE_USER_DATA: "__COCKROACHDB_USER_SQL__" + - ENV_FILE_CACERT_PATH: "/tmp/ca.key" + - ENV_FILE_CACERT_DATA: "__DB_CAKEY__" + - ENV_FILE_CAKEY_PATH: "/tmp/ca.crt" + - ENV_FILE_CAKEY_DATA: "__DB_CACERT__" + - ENV_FILE_CERT_PATH: "/tmp/client.mongooseim.crt" + - ENV_FILE_CERT_DATA: "__MIM_CERT__" + - ENV_FILE_KEY_PATH: "/tmp/client.mongooseim.key" + - ENV_FILE_KEY_DATA: "__MIM_KEY__" + entrypoint: *ENTRYPOINT - &ldap_container image: osixia/openldap:__LDAP_VERSION__ environment: @@ -185,6 +205,11 @@ executors: - image: *OTP27 - *redis_container - *pgsql_container + otp_27_cockroachdb_redis: + docker: + - image: *OTP27 + - *redis_container + - *cockroachdb_container otp_26_ldap_redis: docker: - image: *OTP26 @@ -552,7 +577,8 @@ jobs: preset: type: enum enum: [internal_mnesia, mysql_redis, odbc_mssql_mnesia, ldap_mnesia, - elasticsearch_and_cassandra_mnesia, pgsql_mnesia, pgsql_cets] + elasticsearch_and_cassandra_mnesia, pgsql_mnesia, pgsql_cets, + cockroachdb_cets] description: Preset to run default: internal_mnesia db: @@ -843,6 +869,15 @@ workflows: requires: - otp_27_docker filters: *all_tags + - big_tests_in_docker: + name: cockroachdb_cets_27 + executor: otp_27_cockroachdb_redis + context: mongooseim-org + preset: cockroachdb_cets + db: "mnesia cockroachdb cets" + requires: + - otp_27_docker + filters: *all_tags - big_tests_in_docker: name: pgsql_cets_27 executor: otp_27_pgsql_redis @@ -966,6 +1001,7 @@ workflows: - small_tests_27 - internal_mnesia_27 - pgsql_cets_27 + - cockroachdb_cets_27 - pgsql_mnesia_27 - mysql_redis_27 - mssql_mnesia_27 diff --git a/tools/circle-generate-config.sh b/tools/circle-generate-config.sh index 6b1c3d4a8f3..0fc88845f95 100755 --- a/tools/circle-generate-config.sh +++ b/tools/circle-generate-config.sh @@ -15,6 +15,10 @@ PGSQL_SQL=$(cat32 priv/pg.sql) PGSQL_HBA=$(cat32 tools/db_configs/pgsql/pg_hba.conf) PGSQL_SETUP=$(cat32 tools/docker-setup-postgres.sh) +COCKROACH_SQL=$(cat32 priv/cockroachdb.sql) +COCKROACH_USER_SQL=$(cat32 tools/db_configs/cockroachdb/create_user.sql) +COCKROACH_SETUP=$(cat32 tools/docker-setup-cockroachdb.sh) + MSSQL_SQL=$(cat32 priv/mssql2012.sql) MSSQL_SETUP=$(cat32 tools/docker-setup-mssql.sh) @@ -32,6 +36,7 @@ MIM_PRIV_KEY=$(cat32 tools/ssl/mongooseim/privkey.pem) MIM_DHSERVER=$(cat32 tools/ssl/mongooseim/dh_server.pem) INJECT_FILES=$(cat32 tools/inject-files.sh) CACERT=$(cat32 tools/ssl/ca/cacert.pem) +CAKEY=$(cat32 tools/ssl/ca/cakey.pem) CERTS_CACHE_KEY=$(cat certs_cache_key) @@ -47,6 +52,10 @@ sed -e "s/__MYSQL_CNF__/${MYSQL_CNF}/" \ -e "s/__PGSQL_HBA__/${PGSQL_HBA}/" \ -e "s/__PGSQL_SETUP__/${PGSQL_SETUP}/" \ -e "s/__PGSQL_VERSION__/${PGSQL_VERSION}/g" \ + -e "s/__COCKROACHDB_USER_SQL__/${COCKROACH_USER_SQL}/" \ + -e "s/__COCKROACHDB_SQL__/${COCKROACH_SQL}/" \ + -e "s/__COCKROACHDB_SETUP__/${COCKROACH_SETUP}/" \ + -e "s/__COCKROACHDB_VERSION__/${COCKROACHDB_VERSION}/" \ -e "s/__MSSQL_SQL__/${MSSQL_SQL}/" \ -e "s/__MSSQL_SETUP__/${MSSQL_SETUP}/" \ -e "s/__MSSQL_VERSION__/${MSSQL_VERSION}/" \ @@ -69,6 +78,7 @@ sed -e "s/__MYSQL_CNF__/${MYSQL_CNF}/" \ -e "s/__MIM_DHSERVER__/${MIM_DHSERVER}/" \ -e "s/__INJECT_FILES__/${INJECT_FILES}/" \ -e "s/__DB_CACERT__/${CACERT}/" \ + -e "s/__DB_CAKEY__/${CAKEY}/" \ -e "s/__PYTHON2_BASE32_DEC__/${PYTHON2_BASE32_DEC}/" \ -e "s/__PYTHON3_BASE32_DEC__/${PYTHON3_BASE32_DEC}/" \ -e "s/__CERTS_CACHE_KEY__/${CERTS_CACHE_KEY}/" \ diff --git a/tools/circle-wait-for-db.sh b/tools/circle-wait-for-db.sh index 5c52e4d79a8..025444bc5b9 100755 --- a/tools/circle-wait-for-db.sh +++ b/tools/circle-wait-for-db.sh @@ -15,6 +15,10 @@ function wait_for_db { ./tools/wait-for-it.sh -p 5432 ;; + cockroachdb) + ./tools/wait-for-it.sh -p 26257 + ;; + mssql) ./tools/wait-for-it.sh -p 1433 ./tools/wait-for-it.sh -p 1434 # SCHEMA_READY_PORT diff --git a/tools/db-versions.sh b/tools/db-versions.sh index f04b8fe04b6..e8a41b2802d 100644 --- a/tools/db-versions.sh +++ b/tools/db-versions.sh @@ -7,7 +7,7 @@ MYSQL_VERSION_DEFAULT="8.0.30" PGSQL_VERSION_DEFAULT=latest -COCKROACHDB_VERSION_DEFAULT="latest" +COCKROACHDB_VERSION_DEFAULT=latest MSSQL_VERSION_DEFAULT="2022-CU12-ubuntu-22.04" From fb3f75ed08c438c5ffce23972af8e5dcf11692b7 Mon Sep 17 00:00:00 2001 From: Janusz Jakubiec Date: Wed, 18 Sep 2024 18:13:54 +0200 Subject: [PATCH 03/12] Reverting changes in cockroachdb.sql. Changing UUID to SERIAL --- priv/cockroachdb.sql | 47 ++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/priv/cockroachdb.sql b/priv/cockroachdb.sql index fd9f770bcd9..4ff419e0540 100644 --- a/priv/cockroachdb.sql +++ b/priv/cockroachdb.sql @@ -15,6 +15,7 @@ -- along with this program; if not, write to the Free Software -- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -- + USE mongooseim; CREATE TYPE test_enum_char AS ENUM('A','B', 'C'); @@ -140,13 +141,13 @@ CREATE TABLE privacy_list ( server varchar(250) NOT NULL, username varchar(250) NOT NULL, name text NOT NULL, - id UUID UNIQUE DEFAULT gen_random_uuid(), + id SERIAL UNIQUE, created_at TIMESTAMP NOT NULL DEFAULT now(), PRIMARY KEY (server, username, name) ); CREATE TABLE privacy_list_data ( - id UUID REFERENCES privacy_list(id) ON DELETE CASCADE, + id bigint REFERENCES privacy_list(id) ON DELETE CASCADE, t character(1) NOT NULL, value text NOT NULL, action character(1) NOT NULL, @@ -215,7 +216,7 @@ CREATE TABLE mam_config( ); CREATE TABLE mam_server_user( - id UUID UNIQUE PRIMARY KEY DEFAULT gen_random_uuid(), + id SERIAL UNIQUE PRIMARY KEY, server varchar(250) NOT NULL, user_name varchar(250) NOT NULL ); @@ -244,7 +245,7 @@ CREATE INDEX i_mam_muc_message_sender_id ON mam_muc_message USING BTREE (sender_ CREATE INDEX i_mam_muc_message_room_id_sender_id_origin_id ON mam_muc_message USING BTREE (room_id, sender_id, origin_id); CREATE TABLE offline_message( - id UUID UNIQUE PRIMARY Key DEFAULT gen_random_uuid(), + id SERIAL UNIQUE PRIMARY Key, timestamp BIGINT NOT NULL, expire BIGINT, server varchar(250) NOT NULL, @@ -264,27 +265,27 @@ CREATE TABLE auth_token( ); CREATE TABLE muc_light_rooms( - id UUID NOT NULL UNIQUE DEFAULT gen_random_uuid(), - luser VARCHAR(250) NOT NULL, - lserver VARCHAR(250) NOT NULL, - version VARCHAR(20) NOT NULL, + id BIGSERIAL NOT NULL UNIQUE, + luser VARCHAR(250) NOT NULL, + lserver VARCHAR(250) NOT NULL, + version VARCHAR(20) NOT NULL, PRIMARY KEY (lserver, luser) ); CREATE TABLE muc_rooms( - id UUID NOT NULL UNIQUE DEFAULT gen_random_uuid(), + id BIGSERIAL NOT NULL UNIQUE, muc_host VARCHAR(250) NOT NULL, - room_name VARCHAR(250) NOT NULL, + room_name VARCHAR(250) NOT NULL, options JSON NOT NULL, PRIMARY KEY (muc_host, room_name) ); CREATE TABLE muc_room_aff( - room_id UUID NOT NULL REFERENCES muc_rooms(id), - luser VARCHAR(250) NOT NULL, - lserver VARCHAR(250) NOT NULL, - resource VARCHAR(250) NOT NULL, - aff SMALLINT NOT NULL + room_id BIGINT NOT NULL REFERENCES muc_rooms(id), + luser VARCHAR(250) NOT NULL, + lserver VARCHAR(250) NOT NULL, + resource VARCHAR(250) NOT NULL, + aff SMALLINT NOT NULL ); CREATE INDEX i_muc_room_aff_id ON muc_room_aff (room_id); @@ -298,17 +299,17 @@ CREATE TABLE muc_registered( ); CREATE TABLE muc_light_occupants( - room_id UUID NOT NULL REFERENCES muc_light_rooms(id), - luser VARCHAR(250) NOT NULL, - lserver VARCHAR(250) NOT NULL, - aff SMALLINT NOT NULL + room_id BIGINT NOT NULL REFERENCES muc_light_rooms(id), + luser VARCHAR(250) NOT NULL, + lserver VARCHAR(250) NOT NULL, + aff SMALLINT NOT NULL ); CREATE INDEX i_muc_light_occupants_id ON muc_light_occupants (room_id); CREATE INDEX i_muc_light_occupants_us ON muc_light_occupants (lserver, luser); CREATE TABLE muc_light_config( - room_id UUID NOT NULL REFERENCES muc_light_rooms(id), + room_id BIGINT NOT NULL REFERENCES muc_light_rooms(id), opt VARCHAR(100) NOT NULL, val VARCHAR(250) NOT NULL ); @@ -341,7 +342,7 @@ CREATE INDEX i_inbox_us_box ON inbox USING BTREE(lserver, luser, box); CREATE INDEX i_inbox_box ON inbox (box) WHERE (box = 'bin'); CREATE TABLE pubsub_nodes ( - nidx UUID PRIMARY KEY DEFAULT gen_random_uuid(), + nidx BIGSERIAL PRIMARY KEY, p_key VARCHAR(250) NOT NULL, name VARCHAR(250) NOT NULL, type VARCHAR(250) NOT NULL, @@ -466,7 +467,7 @@ CREATE TABLE domain_admins( -- Mapping from domain hostname to host_type. -- Column id is used for ordering only. CREATE TABLE domain_settings ( - id UUID NOT NULL UNIQUE DEFAULT gen_random_uuid(), + id BIGSERIAL NOT NULL UNIQUE, domain VARCHAR(250) NOT NULL, host_type VARCHAR(250) NOT NULL, status SMALLINT NOT NULL DEFAULT 1, @@ -478,7 +479,7 @@ CREATE TABLE domain_settings ( -- inserted, enabled or disabled. -- Column id is used for ordering and not related to domain_settings.id. CREATE TABLE domain_events ( - id UUID NOT NULL DEFAULT gen_random_uuid(), + id BIGSERIAL NOT NULL, domain VARCHAR(250) NOT NULL, PRIMARY KEY(id) ); From d99d3a195d1d51549deb8ce6379ee258856b51b4 Mon Sep 17 00:00:00 2001 From: Janusz Jakubiec Date: Thu, 19 Sep 2024 13:58:49 +0200 Subject: [PATCH 04/12] Extending minimal certificate validity, because previous 1 hour was not enaugh --- Makefile | 2 +- tools/docker-setup-cockroachdb.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 4398d22f423..83f8e61e8e7 100644 --- a/Makefile +++ b/Makefile @@ -59,7 +59,7 @@ $(DEVNODES): certs configure.out rel/vars-toml.config maybe_clean_certs: if [ "$$SKIP_CERT_BUILD" != 1 ]; then \ - if ! openssl x509 -checkend 3600 -noout -in tools/ssl/ca/cacert.pem ; then \ + if ! openssl x509 -checkend 36000 -noout -in tools/ssl/ca/cacert.pem ; then \ cd tools/ssl && make clean_certs; \ fi \ fi diff --git a/tools/docker-setup-cockroachdb.sh b/tools/docker-setup-cockroachdb.sh index f402a155053..9f1f3cd6454 100644 --- a/tools/docker-setup-cockroachdb.sh +++ b/tools/docker-setup-cockroachdb.sh @@ -1,7 +1,7 @@ #!/bin/bash certs_dir="certs" -lifetime="--lifetime=1h" +lifetime="--lifetime=10h" default_listen_addr_host="127.0.0.1" advertise_addr_host=$default_listen_addr_host From 8ee72822b948e9432e4121b1d65836b1ecaf1aca Mon Sep 17 00:00:00 2001 From: Janusz Jakubiec Date: Thu, 19 Sep 2024 13:59:30 +0200 Subject: [PATCH 05/12] Modifying friends field in mod_vcard_rdbms, to make it call "friends", because it is a keyword in cockroachDB --- src/vcard/mod_vcard_rdbms.erl | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/vcard/mod_vcard_rdbms.erl b/src/vcard/mod_vcard_rdbms.erl index 033d22913c7..2b10e3e0d78 100644 --- a/src/vcard/mod_vcard_rdbms.erl +++ b/src/vcard/mod_vcard_rdbms.erl @@ -347,17 +347,17 @@ column_to_id(<<"lorgunit">>) -> "U". search_columns() -> [<<"username">>, - <<"fn">>, <<"lfn">>, - <<"family">>, <<"lfamily">>, - <<"given">>, <<"lgiven">>, - <<"middle">>, <<"lmiddle">>, - <<"nickname">>, <<"lnickname">>, - <<"bday">>, <<"lbday">>, - <<"ctry">>, <<"lctry">>, - <<"locality">>, <<"llocality">>, - <<"email">>, <<"lemail">>, - <<"orgname">>, <<"lorgname">>, - <<"orgunit">>, <<"lorgunit">>]. + <<"fn">>, <<"lfn">>, + <<"\"family\"">>, <<"lfamily">>, + <<"given">>, <<"lgiven">>, + <<"middle">>, <<"lmiddle">>, + <<"nickname">>, <<"lnickname">>, + <<"bday">>, <<"lbday">>, + <<"ctry">>, <<"lctry">>, + <<"locality">>, <<"llocality">>, + <<"email">>, <<"lemail">>, + <<"orgname">>, <<"lorgname">>, + <<"orgunit">>, <<"lorgunit">>]. search_args(User, Search) -> [User, From f8ed316fe212342fee8484cf34ef80ba5fcb6787 Mon Sep 17 00:00:00 2001 From: Janusz Jakubiec Date: Tue, 1 Oct 2024 12:47:11 +0200 Subject: [PATCH 06/12] Modifying cockroachDB schema. Swapping all serial data types according to cockroachDB documentation --- priv/cockroachdb.sql | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/priv/cockroachdb.sql b/priv/cockroachdb.sql index 4ff419e0540..fcb99c38dfe 100644 --- a/priv/cockroachdb.sql +++ b/priv/cockroachdb.sql @@ -137,11 +137,14 @@ CREATE TABLE privacy_default_list ( PRIMARY KEY (server, username) ); + +CREATE SEQUENCE privacy_list_id_sequence; + CREATE TABLE privacy_list ( server varchar(250) NOT NULL, username varchar(250) NOT NULL, name text NOT NULL, - id SERIAL UNIQUE, + id INT UNIQUE DEFAULT nextval('privacy_list_id_sequence'), created_at TIMESTAMP NOT NULL DEFAULT now(), PRIMARY KEY (server, username, name) ); @@ -215,8 +218,11 @@ CREATE TABLE mam_config( PRIMARY KEY(user_id, remote_jid) ); + +CREATE SEQUENCE mam_server_user_id; + CREATE TABLE mam_server_user( - id SERIAL UNIQUE PRIMARY KEY, + id INT UNIQUE PRIMARY KEY DEFAULT nextval('mam_server_user_id'), server varchar(250) NOT NULL, user_name varchar(250) NOT NULL ); @@ -244,8 +250,11 @@ CREATE TABLE mam_muc_message( CREATE INDEX i_mam_muc_message_sender_id ON mam_muc_message USING BTREE (sender_id); CREATE INDEX i_mam_muc_message_room_id_sender_id_origin_id ON mam_muc_message USING BTREE (room_id, sender_id, origin_id); + +CREATE SEQUENCE offline_message_id; + CREATE TABLE offline_message( - id SERIAL UNIQUE PRIMARY Key, + id INT UNIQUE PRIMARY Key DEFAULT nextval('offline_message_id'), timestamp BIGINT NOT NULL, expire BIGINT, server varchar(250) NOT NULL, @@ -264,18 +273,24 @@ CREATE TABLE auth_token( seq_no BIGINT NOT NULL ); + +CREATE SEQUENCE muc_light_rooms_id; + CREATE TABLE muc_light_rooms( - id BIGSERIAL NOT NULL UNIQUE, + id BIGINT NOT NULL UNIQUE DEFAULT nextval('muc_light_rooms_id'), luser VARCHAR(250) NOT NULL, lserver VARCHAR(250) NOT NULL, version VARCHAR(20) NOT NULL, PRIMARY KEY (lserver, luser) ); + +CREATE SEQUENCE muc_rooms_id; + CREATE TABLE muc_rooms( - id BIGSERIAL NOT NULL UNIQUE, + id BIGINT NOT NULL UNIQUE DEFAULT nextval('muc_rooms_id'), muc_host VARCHAR(250) NOT NULL, - room_name VARCHAR(250) NOT NULL, + room_name VARCHAR(250) NOT NULL, options JSON NOT NULL, PRIMARY KEY (muc_host, room_name) ); @@ -341,8 +356,11 @@ CREATE INDEX i_inbox_timestamp ON inbox USING BTREE(lserver, luser, timestamp); CREATE INDEX i_inbox_us_box ON inbox USING BTREE(lserver, luser, box); CREATE INDEX i_inbox_box ON inbox (box) WHERE (box = 'bin'); + +CREATE SEQUENCE pubsub_nodes_nidx; + CREATE TABLE pubsub_nodes ( - nidx BIGSERIAL PRIMARY KEY, + nidx BIGINT PRIMARY KEY DEFAULT nextval('pubsub_nodes_nidx'), p_key VARCHAR(250) NOT NULL, name VARCHAR(250) NOT NULL, type VARCHAR(250) NOT NULL, @@ -464,22 +482,27 @@ CREATE TABLE domain_admins( PRIMARY KEY(domain) ); + +CREATE SEQUENCE domain_settings_id_sequence; + -- Mapping from domain hostname to host_type. -- Column id is used for ordering only. CREATE TABLE domain_settings ( - id BIGSERIAL NOT NULL UNIQUE, + id BIGINT NOT NULL UNIQUE DEFAULT nextval('domain_settings_id_sequence'), domain VARCHAR(250) NOT NULL, host_type VARCHAR(250) NOT NULL, status SMALLINT NOT NULL DEFAULT 1, PRIMARY KEY(domain) ); +CREATE SEQUENCE domain_events_id_sequence; + -- A new record is inserted into domain_events, each time -- domain_settings table is updated: i.e. when a domain is removed, -- inserted, enabled or disabled. -- Column id is used for ordering and not related to domain_settings.id. CREATE TABLE domain_events ( - id BIGSERIAL NOT NULL, + id BIGINT NOT NULL UNIQUE DEFAULT nextval('domain_events_id_sequence'), domain VARCHAR(250) NOT NULL, PRIMARY KEY(id) ); From 7b0245e57a5e7f6aa6ab30297064efb4e4c49607 Mon Sep 17 00:00:00 2001 From: Janusz Jakubiec Date: Tue, 1 Oct 2024 12:48:55 +0200 Subject: [PATCH 07/12] CockroachDB uses port 8080. Changing all 8080 ports used in tests to avoid conflicts. --- big_tests/tests/muc_SUITE.erl | 4 ++-- test/cowboy_SUITE.erl | 10 +++++----- test/http_client_SUITE.erl | 8 ++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/big_tests/tests/muc_SUITE.erl b/big_tests/tests/muc_SUITE.erl index 6cac33933af..3d7529d7d27 100644 --- a/big_tests/tests/muc_SUITE.erl +++ b/big_tests/tests/muc_SUITE.erl @@ -386,13 +386,13 @@ init_per_group(disco_rsm_with_offline, Config) -> init_per_group(G, Config) when G =:= http_auth_no_server; G =:= http_auth -> PoolOpts = #{strategy => available_worker, workers => 5}, - ConnOpts = #{host => "http://localhost:8080", path_prefix => <<"/muc/auth/">>, + ConnOpts = #{host => "http://localhost:8081", path_prefix => <<"/muc/auth/">>, request_timeout => 2000}, Pool = config([outgoing_pools, http, muc_http_auth_test], #{opts => PoolOpts, conn_opts => ConnOpts}), [{ok, _Pid}] = rpc(mim(), mongoose_wpool, start_configured_pools, [[Pool]]), case G of - http_auth -> http_helper:start(8080, "/muc/auth/check_password", fun handle_http_auth/1); + http_auth -> http_helper:start(8081, "/muc/auth/check_password", fun handle_http_auth/1); _ -> ok end, ConfigWithModules = dynamic_modules:save_modules(host_type(), Config), diff --git a/test/cowboy_SUITE.erl b/test/cowboy_SUITE.erl index ebf5560b579..bfe1d39e7b9 100644 --- a/test/cowboy_SUITE.erl +++ b/test/cowboy_SUITE.erl @@ -20,7 +20,7 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). --define(SERVER, "http://localhost:8080"). +-define(SERVER, "http://localhost:8081"). -import(ejabberd_helper, [use_config_file/2, start_ejabberd_with_config/2]). @@ -128,7 +128,7 @@ ws_request_bad_protocol(_Config) -> ws_requests_xmpp(_Config) -> %% Given Host = "localhost", - Port = 8080, + Port = 8081, Protocol = <<"xmpp">>, BinaryPing = ws_tx_frame(<<"ping">>, 2), BinaryPong = ws_rx_frame(<<"pong">>, 2), @@ -151,7 +151,7 @@ ws_requests_xmpp(_Config) -> ws_requests_other(_Config) -> %% Given Host = "localhost", - Port = 8080, + Port = 8081, Protocol = <<"other">>, TextPing = ws_tx_frame(<<"ping">>, 1), TextPong = ws_rx_frame(<<"pong">>, 1), @@ -181,7 +181,7 @@ mixed_requests(_Config) -> TextPong = ws_rx_frame(<<"pong">>, 1), Host = "localhost", - Port = 8080, + Port = 8081, HTTPHost = ?SERVER, Path = <<"/">>, @@ -245,7 +245,7 @@ start_cowboy() -> }]), {ok, _Pid} = cowboy:start_clear(http_listener, #{num_acceptors => 20, - socket_opts => [{port, 8080}]}, + socket_opts => [{port, 8081}]}, #{env => #{dispatch => Dispatch}}). stop_cowboy() -> diff --git a/test/http_client_SUITE.erl b/test/http_client_SUITE.erl index 0fd7b5dd565..0f44692bd3b 100644 --- a/test/http_client_SUITE.erl +++ b/test/http_client_SUITE.erl @@ -30,7 +30,7 @@ all() -> ]. init_per_suite(Config) -> - http_helper:start(8080, '_', fun process_request/1), + http_helper:start(8081, '_', fun process_request/1), Pid = self(), spawn(fun() -> register(test_helper, self()), @@ -61,12 +61,12 @@ init_per_testcase(TC, Config) -> Config. pool_opts(request_timeout_test) -> - #{conn_opts => #{host => "http://localhost:8080", request_timeout => 10}}; + #{conn_opts => #{host => "http://localhost:8081", request_timeout => 10}}; pool_opts(pool_timeout_test) -> #{opts => #{workers => 1, max_overflow => 0, strategy => available_worker, call_timeout => 10}, - conn_opts => #{host => "http://localhost:8080", request_timeout => 5000}}; + conn_opts => #{host => "http://localhost:8081", request_timeout => 5000}}; pool_opts(_TC) -> - #{conn_opts => #{host => "http://localhost:8080", request_timeout => 1000}}. + #{conn_opts => #{host => "http://localhost:8081", request_timeout => 1000}}. end_per_testcase(_TC, _Config) -> mongoose_wpool:stop(http, global, pool()). From ca0f7acf38d3c7a3077b5b0b9171867e5676b900 Mon Sep 17 00:00:00 2001 From: Janusz Jakubiec Date: Tue, 1 Oct 2024 12:50:31 +0200 Subject: [PATCH 08/12] Fixing mod_vcard_rdbms, because there is column named "family" which is registered keyword in cockroachDB --- src/rdbms/rdbms_queries.erl | 43 +++++++++++++++++++++++++++++++---- src/vcard/mod_vcard_rdbms.erl | 23 +++++++++++++++---- 2 files changed, 57 insertions(+), 9 deletions(-) diff --git a/src/rdbms/rdbms_queries.erl b/src/rdbms/rdbms_queries.erl index 7affbb86b9a..e2b975e895a 100644 --- a/src/rdbms/rdbms_queries.erl +++ b/src/rdbms/rdbms_queries.erl @@ -172,10 +172,16 @@ prepare_upsert(HostType, Name, Table, InsertFields, Updates, UniqueKeyFields) -> IncrementalField :: none | binary()) -> {ok, QueryName :: mongoose_rdbms:query_name()} | {error, already_exists}. prepare_upsert(HostType, Name, Table, InsertFields, Updates, UniqueKeyFields, IncrementalField) -> - SQL = upsert_query(HostType, Table, InsertFields, Updates, UniqueKeyFields, IncrementalField), + InsertFieldsTransformed = transform_fields(HostType, InsertFields), + UpdatesTransformed = transform_fields(HostType, Updates), + UniqueKeyFieldsTransformed = transform_fields(HostType, UniqueKeyFields), + IncrementalFieldTransformed = transform_fields(HostType, IncrementalField), + SQL = upsert_query(HostType, Table, InsertFieldsTransformed, UpdatesTransformed, + UniqueKeyFieldsTransformed, IncrementalFieldTransformed), Query = iolist_to_binary(SQL), ?LOG_DEBUG(#{what => rdbms_upsert_query, name => Name, query => Query}), - Fields = prepared_upsert_fields(InsertFields, Updates, UniqueKeyFields), + Fields = prepared_upsert_fields(InsertFieldsTransformed, UpdatesTransformed, + UniqueKeyFieldsTransformed), mongoose_rdbms:prepare(Name, Table, Fields, Query). prepared_upsert_fields(InsertFields, Updates, UniqueKeyFields) -> @@ -206,10 +212,15 @@ prepared_upsert_many_fields(RecordCount, InsertFields, Updates, _UniqueKeyFields UniqueKeyFields :: [binary()]) -> {ok, QueryName :: mongoose_rdbms:query_name()} | {error, already_exists}. prepare_upsert_many(HostType, RecordCount, Name, Table, InsertFields, Updates, UniqueKeyFields) -> - SQL = upsert_query_many(HostType, RecordCount, Table, InsertFields, Updates, UniqueKeyFields), + InsertFieldsTransformed = transform_fields(HostType, InsertFields), + UpdatesTransformed = transform_fields(HostType, Updates), + UniqueKeyFieldsTransformed = transform_fields(HostType, UniqueKeyFields), + SQL = upsert_query_many(HostType, RecordCount, Table, InsertFieldsTransformed, + UpdatesTransformed, UniqueKeyFieldsTransformed), Query = iolist_to_binary(SQL), ?LOG_DEBUG(#{what => rdbms_upsert_query, name => Name, query => Query}), - Fields = prepared_upsert_many_fields(RecordCount, InsertFields, Updates, UniqueKeyFields), + Fields = prepared_upsert_many_fields(RecordCount, InsertFieldsTransformed, + UpdatesTransformed, UniqueKeyFieldsTransformed), mongoose_rdbms:prepare(Name, Table, Fields, Query). upsert_query(HostType, Table, InsertFields, Updates, UniqueKeyFields, IncrementalField) -> @@ -457,3 +468,27 @@ limit_offset_args(Limit, Offset) -> limit_offset_args(mssql, Limit, Offset) -> [Offset, Limit]; limit_offset_args(_, Limit, Offset) -> [Limit, Offset]. + +transform_fields(_, none) -> + none; +transform_fields(HostType, Fields) when is_list(Fields) -> + case mongoose_rdbms:db_engine(HostType) of + pgsql -> + lists:map(fun(Element) -> transform_field(Element) end, Fields); + _ -> + Fields + end; +transform_fields(HostType, Field) when is_binary(Field) -> + case mongoose_rdbms:db_engine(HostType) of + pgsql -> + transform_field(Field); + _ -> + Field + end. + +transform_field({_, Field, _} = Element) -> + erlang:setelement(2, Element, transform_field(Field)); +transform_field(Field) when is_binary(Field)-> + <<"\"", Field/binary, "\"">>; +transform_field(Element) -> + Element. diff --git a/src/vcard/mod_vcard_rdbms.erl b/src/vcard/mod_vcard_rdbms.erl index 2b10e3e0d78..edfafeddd19 100644 --- a/src/vcard/mod_vcard_rdbms.erl +++ b/src/vcard/mod_vcard_rdbms.erl @@ -178,11 +178,12 @@ search(HostType, LServer, Data) -> _ -> Limit = mod_vcard:get_results_limit(HostType), LimitType = limit_type(Limit), + DBEngine = mongoose_rdbms:db_engine(HostType), StmtName = filters_to_statement_name(Filters, LimitType), case mongoose_rdbms:prepared(StmtName) of false -> %% Create a new type of a query - SQL = search_sql_binary(Filters, LimitType), + SQL = search_sql_binary(DBEngine, Filters, LimitType), Columns = filters_to_columns(Filters, LimitType), mongoose_rdbms:prepare(StmtName, vcard_search, Columns, SQL); true -> @@ -194,12 +195,12 @@ search(HostType, LServer, Data) -> record_to_items(Rs); Error -> ?LOG_ERROR(#{what => vcard_db_search_failed, statement => StmtName, - sql_query => search_sql_binary(Filters, LimitType), + sql_query => search_sql_binary(DBEngine, Filters, LimitType), reason => Error, host => LServer}), [] catch Class:Error:Stacktrace -> ?LOG_ERROR(#{what => vcard_db_search_failed, statement => StmtName, - sql_query => search_sql_binary(Filters, LimitType), + sql_query => search_sql_binary(DBEngine, Filters, LimitType), class => Class, stacktrace => Stacktrace, reason => Error, host => LServer}), [] @@ -239,7 +240,9 @@ filters_to_args(Filters, LimitType, Limit) -> limit -> Args ++ [Limit] end. -search_sql_binary(Filters, LimitType) -> +search_sql_binary(pgsql, Filters, LimitType) -> + iolist_to_binary(search_sql_pgsql(Filters, LimitType)); +search_sql_binary(_, Filters, LimitType) -> iolist_to_binary(search_sql(Filters, LimitType)). search_sql(Filters, LimitType) -> @@ -252,6 +255,16 @@ search_sql(Filters, LimitType) -> "FROM vcard_search ">>, RestrictionSQL, LimitSQL]. +search_sql_pgsql(Filters, LimitType) -> + {TopSQL, LimitSQL} = limit_type_to_sql(LimitType), + RestrictionSQL = filters_to_sql(Filters), + [<<"SELECT ">>, TopSQL, + <<" \"username\", \"server\", \"fn\", \"family\", \"given\", \"middle\", " + "\"nickname\", \"bday\", \"ctry\", \"locality\", " + "\"email\", \"orgname\", \"orgunit\" " + "FROM vcard_search ">>, + RestrictionSQL, LimitSQL]. + -spec limit_type_to_sql(limit_type()) -> {binary(), binary()}. limit_type_to_sql(infinity) -> {<<>>, <<>>}; @@ -348,7 +361,7 @@ column_to_id(<<"lorgunit">>) -> "U". search_columns() -> [<<"username">>, <<"fn">>, <<"lfn">>, - <<"\"family\"">>, <<"lfamily">>, + <<"family">>, <<"lfamily">>, <<"given">>, <<"lgiven">>, <<"middle">>, <<"lmiddle">>, <<"nickname">>, <<"lnickname">>, From f37c17907fc681c61ba886ee79e79d822b75ab71 Mon Sep 17 00:00:00 2001 From: Janusz Jakubiec Date: Mon, 7 Oct 2024 13:18:09 +0200 Subject: [PATCH 09/12] Adding new rdbms backend, cockroachDB --- big_tests/test.config | 2 +- big_tests/tests/rdbms_SUITE.erl | 5 +- src/auth/ejabberd_auth_rdbms.erl | 4 +- src/config/mongoose_config_spec.erl | 2 +- src/pubsub/mod_pubsub_db_rdbms.erl | 4 +- src/rdbms/mongoose_rdbms.erl | 8 +-- src/rdbms/mongoose_rdbms_cockroachdb.erl | 69 ++++++++++++++++++++++++ src/rdbms/mongoose_rdbms_timestamp.erl | 2 +- src/rdbms/rdbms_queries.erl | 14 ++--- src/vcard/mod_vcard_rdbms.erl | 6 +-- src/wpool/mongoose_wpool_rdbms.erl | 5 +- tools/test-runner.sh | 1 + 12 files changed, 99 insertions(+), 23 deletions(-) create mode 100644 src/rdbms/mongoose_rdbms_cockroachdb.erl diff --git a/big_tests/test.config b/big_tests/test.config index 2b0213de6bb..84137c946da 100644 --- a/big_tests/test.config +++ b/big_tests/test.config @@ -287,7 +287,7 @@ [outgoing_pools.rdbms.default] scope = \"global\" workers = 5 - connection.driver = \"pgsql\" + connection.driver = \"cockroachdb\" connection.host = \"localhost\" connection.port = 26257 connection.database = \"mongooseim\" diff --git a/big_tests/tests/rdbms_SUITE.erl b/big_tests/tests/rdbms_SUITE.erl index 01e81334f3c..512607f3903 100644 --- a/big_tests/tests/rdbms_SUITE.erl +++ b/big_tests/tests/rdbms_SUITE.erl @@ -226,7 +226,7 @@ binary_values() -> binary:copy(<<0>>, 100000), null ] ++ - case is_odbc() orelse is_pgsql() of + case is_odbc() orelse is_pgsql() orelse is_cockroachdb() of true -> []; false -> @@ -1277,6 +1277,9 @@ is_pgsql() -> is_mysql() -> db_engine() == mysql. +is_cockroachdb() -> + db_engine() == cockroachdb. + stop_global_default_pool() -> Pools = rpc(mim(), mongoose_config, get_opt, [outgoing_pools]), [GlobalRdbmsPool] = [Pool || Pool = #{type := rdbms, scope := global, tag := default} <- Pools], diff --git a/src/auth/ejabberd_auth_rdbms.erl b/src/auth/ejabberd_auth_rdbms.erl index a863676ed7c..dcc3682e9c0 100644 --- a/src/auth/ejabberd_auth_rdbms.erl +++ b/src/auth/ejabberd_auth_rdbms.erl @@ -447,7 +447,7 @@ prepare_count_users(HostType) -> prepare(auth_count_users_estimate, 'information_schema.tables', [], <<"SELECT table_rows FROM information_schema.tables " "WHERE table_name = 'users'">>); - {true, pgsql} -> + {true, Driver} when Driver =:= pgsql; Driver =:= cockroachdb -> prepare_count_users(), prepare(auth_count_users_estimate, pg_class, [], <<"SELECT reltuples::numeric FROM pg_class " @@ -516,7 +516,7 @@ execute_count_users(HostType, LServer, #{}) -> mongoose_rdbms:db_engine(LServer)} of {true, mysql} -> execute_successfully(HostType, auth_count_users_estimate, []); - {true, pgsql} -> + {true, Driver} when Driver =:= pgsql; Driver =:= cockroachdb -> case execute_successfully(HostType, auth_count_users_estimate, []) of {selected,[{<<"-1">>}]} -> execute_successfully(HostType, auth_count_users, [LServer]); diff --git a/src/config/mongoose_config_spec.erl b/src/config/mongoose_config_spec.erl index 4728e741929..93630a49108 100644 --- a/src/config/mongoose_config_spec.erl +++ b/src/config/mongoose_config_spec.erl @@ -579,7 +579,7 @@ outgoing_pool_connection(<<"rabbit">>) -> outgoing_pool_connection(<<"rdbms">>) -> #section{ items = #{<<"driver">> => #option{type = atom, - validate = {enum, [odbc, pgsql, mysql]}}, + validate = {enum, [odbc, pgsql, mysql, cockroachdb]}}, <<"keepalive_interval">> => #option{type = integer, validate = positive}, <<"query_timeout">> => #option{type = integer, diff --git a/src/pubsub/mod_pubsub_db_rdbms.erl b/src/pubsub/mod_pubsub_db_rdbms.erl index 54e3f649e5f..1fcf315f164 100644 --- a/src/pubsub/mod_pubsub_db_rdbms.erl +++ b/src/pubsub/mod_pubsub_db_rdbms.erl @@ -258,7 +258,7 @@ prepare_select_nodes_by_owner() -> {mysql, _} -> mongoose_rdbms:prepare(pubsub_select_nodes_by_owner, pubsub_nodes, [owners], <<"SELECT name, type FROM pubsub_nodes WHERE owners = convert(?, JSON);">>); - {pgsql, _} -> + {Driver, _} when Driver =:= pgsql; Driver =:= cockroachdb -> mongoose_rdbms:prepare(pubsub_select_nodes_by_owner, pubsub_nodes, [owners], <<"SELECT name, type FROM pubsub_nodes WHERE owners ::json->>0 like ? " "AND JSON_ARRAY_LENGTH(owners) = 1">>); @@ -522,7 +522,7 @@ execute_get_user_items(LU, LS) -> -spec execute_select_nodes_by_owner(LJID :: binary()) -> mongoose_rdbms:query_result(). execute_select_nodes_by_owner(LJID) -> case mongoose_rdbms:db_engine(global) of - pgsql -> + Driver when Driver =:= pgsql; Driver =:= cockroachdb -> mongoose_rdbms:execute_successfully(global, pubsub_select_nodes_by_owner, [LJID]); _ -> diff --git a/src/rdbms/mongoose_rdbms.erl b/src/rdbms/mongoose_rdbms.erl index 11819ece7c7..8f7af7c93da 100644 --- a/src/rdbms/mongoose_rdbms.erl +++ b/src/rdbms/mongoose_rdbms.erl @@ -170,7 +170,7 @@ -type dirty_result() :: {ok, any()} | {error, any()}. -export_type([query_name/0, query_result/0, transaction_result/0]). --type backend() :: pgsql | mysql | odbc. +-type backend() :: pgsql | mysql | odbc | cockroachdb. -type options() :: #{driver := backend(), max_start_interval := pos_integer(), query_timeout := pos_integer(), @@ -193,13 +193,15 @@ process_options(Opts) -> process_tls_options(Opts = #{driver := mysql, tls := #{required := _}}) -> error(#{what => invalid_rdbms_tls_options, options => Opts, text => <<"The 'required' option is not supported for MySQL">>}); -process_tls_options(Opts = #{driver := pgsql, tls := TLSOpts}) -> +process_tls_options(Opts = #{driver := Driver, tls := TLSOpts}) when Driver =:= pgsql; + Driver =:= cockroachdb -> Opts#{tls := maps:merge(#{required => false}, TLSOpts)}; process_tls_options(Opts) -> Opts. ensure_db_port(Opts = #{port := _}) -> Opts; ensure_db_port(Opts = #{driver := pgsql}) -> Opts#{port => 5432}; +ensure_db_port(Opts = #{driver := cockroachdb}) -> Opts#{port => 26257}; ensure_db_port(Opts = #{driver := mysql}) -> Opts#{port => 3306}. -spec prepare( @@ -913,7 +915,7 @@ abort_on_driver_error({error, "Failed sending data on socket" ++ _}) -> %% mysql abort_on_driver_error(_) -> continue. --spec db_engine(mongooseim:host_type_or_global()) -> odbc | mysql | pgsql | undefined. +-spec db_engine(mongooseim:host_type_or_global()) -> odbc | mysql | pgsql | cockroachdb | undefined. db_engine(_HostType) -> try mongoose_backend:get_backend_name(global, ?MODULE) catch error:badarg -> undefined end. diff --git a/src/rdbms/mongoose_rdbms_cockroachdb.erl b/src/rdbms/mongoose_rdbms_cockroachdb.erl new file mode 100644 index 00000000000..b113e3b084c --- /dev/null +++ b/src/rdbms/mongoose_rdbms_cockroachdb.erl @@ -0,0 +1,69 @@ +%%============================================================================== +%% Copyright 2016 Erlang Solutions Ltd. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%============================================================================== + +-module(mongoose_rdbms_cockroachdb). +-author('janusz.jakubiec@erlang-solutions.com'). +-behaviour(mongoose_rdbms_backend). + +-type options() :: #{host := string(), + port := inet:port_number(), + database := string(), + username := string(), + password := string(), + atom() => any()}. + +-export([escape_binary/1, unescape_binary/1, escape_string/1, connect/2, + disconnect/1, query/3, prepare/5, execute/4]). + +%% API + +-spec escape_string(iolist()) -> iodata(). +escape_string(Iolist) -> + Bin = iolist_to_binary(Iolist), + [$', binary:replace(Bin, <<"'">>, <<"''">>, [global]), $']. + +-spec escape_binary(binary()) -> iodata(). +escape_binary(Bin) -> + mongoose_rdbms_pgsql:escape_binary(Bin). + +-spec unescape_binary(binary()) -> binary(). +unescape_binary(Bin) -> + mongoose_rdbms_pgsql:unescape_binary(Bin). + +-spec connect(options(), QueryTimeout :: non_neg_integer()) -> + {ok, Connection :: term()} | {error, Reason :: any()}. +connect(Options, QueryTimeout) -> + mongoose_rdbms_pgsql:connect(Options, QueryTimeout). + +-spec disconnect(Connection :: epgsql:connection()) -> ok. +disconnect(Connection) -> + mongoose_rdbms_pgsql:disconnect(Connection). + +-spec query(Connection :: term(), Query :: any(), + Timeout :: infinity | non_neg_integer()) -> mongoose_rdbms:query_result(). +query(Connection, Query, Timeout) -> + mongoose_rdbms_pgsql:query(Connection, Query, Timeout). + +-spec prepare(Connection :: term(), Name :: atom(), Table :: binary(), + Fields :: [binary()], Statement :: iodata()) -> + {ok, term()} | {error, any()}. +prepare(Connection, Name, Table, Fields, Statement) -> + mongoose_rdbms_pgsql:prepare(Connection, Name, Table, Fields, Statement). + +-spec execute(Connection :: term(), StatementRef :: term(), Params :: [term()], + Timeout :: infinity | non_neg_integer()) -> mongoose_rdbms:query_result(). +execute(Connection, StatementRef, Params, Timeout) -> + mongoose_rdbms_pgsql:execute(Connection, StatementRef, Params, Timeout). diff --git a/src/rdbms/mongoose_rdbms_timestamp.erl b/src/rdbms/mongoose_rdbms_timestamp.erl index 48da86826cf..b4ee1ac0bd9 100644 --- a/src/rdbms/mongoose_rdbms_timestamp.erl +++ b/src/rdbms/mongoose_rdbms_timestamp.erl @@ -13,7 +13,7 @@ select_query() -> case {mongoose_rdbms:db_engine(global), mongoose_rdbms:db_type()} of {mysql, _} -> <<"SELECT UNIX_TIMESTAMP()">>; - {pgsql, _} -> + {Driver, _} when Driver =:= pgsql; Driver =:= cockroachdb -> <<"SELECT CAST(extract(epoch from now()) AS integer)">>; {odbc, mssql} -> <<"SELECT DATEDIFF_BIG(second, '1970-01-01 00:00:00', GETUTCDATE())">>; diff --git a/src/rdbms/rdbms_queries.erl b/src/rdbms/rdbms_queries.erl index e2b975e895a..5f647f80fcb 100644 --- a/src/rdbms/rdbms_queries.erl +++ b/src/rdbms/rdbms_queries.erl @@ -104,7 +104,7 @@ execute_upsert(HostType, PoolTag, Name, InsertParams, UpdateParams, UniqueKeyVal case {mongoose_rdbms:db_engine(HostType), mongoose_rdbms:db_type()} of {mysql, _} -> mongoose_rdbms:execute(HostType, PoolTag, Name, InsertParams ++ UpdateParams); - {pgsql, _} -> + {Driver, _} when Driver =:= pgsql; Driver =:= cockroachdb -> mongoose_rdbms:execute(HostType, PoolTag, Name, InsertParams ++ UpdateParams); {odbc, mssql} -> mongoose_rdbms:execute(HostType, PoolTag, Name, UniqueKeyValues ++ InsertParams ++ UpdateParams); @@ -127,7 +127,7 @@ execute_upsert_many(HostType, PoolTag, Name, InsertParams, UpdateParams) -> case {mongoose_rdbms:db_engine(HostType), mongoose_rdbms:db_type()} of {mysql, _} -> mongoose_rdbms:execute(HostType, PoolTag, Name, InsertParams); - {pgsql, _} -> + {Driver, _} when Driver =:= pgsql; Driver =:= cockroachdb -> mongoose_rdbms:execute(HostType, PoolTag, Name, InsertParams ++ UpdateParams); {odbc, mssql} -> mongoose_rdbms:execute(HostType, PoolTag, Name, InsertParams); @@ -143,7 +143,7 @@ request_upsert(HostType, Name, InsertParams, UpdateParams, UniqueKeyValues) -> case {mongoose_rdbms:db_engine(HostType), mongoose_rdbms:db_type()} of {mysql, _} -> mongoose_rdbms:execute_request(HostType, Name, InsertParams ++ UpdateParams); - {pgsql, _} -> + {Driver, _} when Driver =:= pgsql; Driver =:= cockroachdb -> mongoose_rdbms:execute_request(HostType, Name, InsertParams ++ UpdateParams); {odbc, mssql} -> mongoose_rdbms:execute_request(HostType, Name, UniqueKeyValues ++ InsertParams ++ UpdateParams); @@ -227,7 +227,7 @@ upsert_query(HostType, Table, InsertFields, Updates, UniqueKeyFields, Incrementa case {mongoose_rdbms:db_engine(HostType), mongoose_rdbms:db_type()} of {mysql, _} -> upsert_mysql_query(Table, InsertFields, Updates, UniqueKeyFields, IncrementalField); - {pgsql, _} -> + {Driver, _} when Driver =:= pgsql; Driver =:= cockroachdb -> upsert_pgsql_query(Table, InsertFields, Updates, UniqueKeyFields, IncrementalField); {odbc, mssql} -> upsert_mssql_query(Table, InsertFields, Updates, UniqueKeyFields); @@ -238,7 +238,7 @@ upsert_query_many(HostType, RecordCount, Table, InsertFields, Updates, UniqueKey case {mongoose_rdbms:db_engine(HostType), mongoose_rdbms:db_type()} of {mysql, _} -> upsert_many_mysql_query(RecordCount, Table, InsertFields); - {pgsql, _} -> + {Driver, _} when Driver =:= pgsql; Driver =:= cockroachdb -> upsert_many_pgsql_query(RecordCount, Table, InsertFields, Updates, UniqueKeyFields); {odbc, mssql} -> upsert_many_mssql_query(RecordCount, Table, InsertFields, Updates, UniqueKeyFields); @@ -473,14 +473,14 @@ transform_fields(_, none) -> none; transform_fields(HostType, Fields) when is_list(Fields) -> case mongoose_rdbms:db_engine(HostType) of - pgsql -> + cockroachdb -> lists:map(fun(Element) -> transform_field(Element) end, Fields); _ -> Fields end; transform_fields(HostType, Field) when is_binary(Field) -> case mongoose_rdbms:db_engine(HostType) of - pgsql -> + cockroachdb -> transform_field(Field); _ -> Field diff --git a/src/vcard/mod_vcard_rdbms.erl b/src/vcard/mod_vcard_rdbms.erl index edfafeddd19..fe219134273 100644 --- a/src/vcard/mod_vcard_rdbms.erl +++ b/src/vcard/mod_vcard_rdbms.erl @@ -240,8 +240,8 @@ filters_to_args(Filters, LimitType, Limit) -> limit -> Args ++ [Limit] end. -search_sql_binary(pgsql, Filters, LimitType) -> - iolist_to_binary(search_sql_pgsql(Filters, LimitType)); +search_sql_binary(cockroachdb, Filters, LimitType) -> + iolist_to_binary(search_sql_cockroachdb(Filters, LimitType)); search_sql_binary(_, Filters, LimitType) -> iolist_to_binary(search_sql(Filters, LimitType)). @@ -255,7 +255,7 @@ search_sql(Filters, LimitType) -> "FROM vcard_search ">>, RestrictionSQL, LimitSQL]. -search_sql_pgsql(Filters, LimitType) -> +search_sql_cockroachdb(Filters, LimitType) -> {TopSQL, LimitSQL} = limit_type_to_sql(LimitType), RestrictionSQL = filters_to_sql(Filters), [<<"SELECT ">>, TopSQL, diff --git a/src/wpool/mongoose_wpool_rdbms.erl b/src/wpool/mongoose_wpool_rdbms.erl index 67ba74536df..b6e7d8f98a1 100644 --- a/src/wpool/mongoose_wpool_rdbms.erl +++ b/src/wpool/mongoose_wpool_rdbms.erl @@ -70,7 +70,8 @@ get_rdbms_data_stats(HostType, Tag) -> Stats#{workers => length(RDBMSConnections)}. get_port_from_rdbms_connection({{ok, DB, Pid}, _WorkerPid}) when DB =:= mysql; - DB =:= pgsql -> + DB =:= pgsql; + DB =:= cockroachdb -> ProcState = sys:get_state(Pid), get_port_from_proc_state(DB, ProcState); get_port_from_rdbms_connection({{ok, odbc, Pid}, WorkerPid}) -> @@ -90,7 +91,7 @@ get_port_from_proc_state(mysql, State) -> %% stmts = dict:new(), query_cache = empty, cap_found_rows = false}). SockInfo = element(4, State), get_port_from_sock(SockInfo); -get_port_from_proc_state(pgsql, State) -> +get_port_from_proc_state(Driver, State) when Driver =:= pgsql; Driver =:= cockroachdb -> %% -record(state, {mod, %% sock, %% data = <<>>, diff --git a/tools/test-runner.sh b/tools/test-runner.sh index ae3aed2a0f1..1828d829e67 100755 --- a/tools/test-runner.sh +++ b/tools/test-runner.sh @@ -266,6 +266,7 @@ PRESETS_ARRAY=( SRC_TESTSPEC="default.spec" DBS_ARRAY=( + cockroachdb mysql pgsql cassandra From b69fa3f7511b62b489aa2f0eb0f4dc0021314d84 Mon Sep 17 00:00:00 2001 From: Janusz Jakubiec Date: Mon, 7 Oct 2024 15:37:17 +0200 Subject: [PATCH 10/12] Adding cockroachDB config parser tests --- src/config/mongoose_config_spec.erl | 2 +- test/config_parser_SUITE.erl | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/config/mongoose_config_spec.erl b/src/config/mongoose_config_spec.erl index 93630a49108..943a3635d5e 100644 --- a/src/config/mongoose_config_spec.erl +++ b/src/config/mongoose_config_spec.erl @@ -590,7 +590,7 @@ outgoing_pool_connection(<<"rdbms">>) -> % odbc <<"settings">> => #option{type = string}, - % mysql, pgsql + % mysql, pgsql, cockroachdb <<"host">> => #option{type = string, validate = non_empty}, <<"database">> => #option{type = string, diff --git a/test/config_parser_SUITE.erl b/test/config_parser_SUITE.erl index 8028c31a586..77f1171ae19 100644 --- a/test/config_parser_SUITE.erl +++ b/test/config_parser_SUITE.erl @@ -128,8 +128,10 @@ groups() -> pool_rdbms, pool_rdbms_connection_odbc, pool_rdbms_connection_pgsql, + pool_rdbms_connection_cockroachdb, pool_rdbms_connection_mysql, pool_rdbms_connection_tls_pgsql, + pool_rdbms_connection_tls_cockroachdb, pool_rdbms_connection_tls_mysql, pool_http, pool_http_connection, @@ -936,6 +938,24 @@ pool_rdbms_connection_tls_pgsql(_Config) -> ?err(T(M#{<<"required">> => <<"maybe">>})), test_just_tls_client(P, T). +pool_rdbms_connection_cockroachdb(_Config) -> + P = [outgoing_pools, 1, conn_opts], + T = fun(Opts) -> pool_conn_raw(<<"rdbms">>, Opts) end, + Required = raw_sql_opts(cockroachdb), + test_pool_rdbms_connection_common_opts(P, T, Required), + test_pool_rdbms_connection_sql_opts(P, T, Required, sql_opts(cockroachdb, 26257)). + +pool_rdbms_connection_tls_cockroachdb(_Config) -> + P = [outgoing_pools, 1, conn_opts, tls], + Required = raw_sql_opts(cockroachdb), + T = fun(Opts) -> pool_conn_raw(<<"rdbms">>, Required#{<<"tls">> => Opts}) end, + M = tls_ca_raw(), + ?cfg(P, config([outgoing_pools, rdbms, default, conn_opts, tls], (tls_ca())#{required => false}), + T(M)), + ?cfg(P ++ [required], true, T(M#{<<"required">> => true})), + ?err(T(M#{<<"required">> => <<"maybe">>})), + test_just_tls_client(P, T). + pool_rdbms_connection_mysql(_Config) -> P = [outgoing_pools, 1, conn_opts], T = fun(Opts) -> pool_conn_raw(<<"rdbms">>, Opts) end, From 28711100075d13462edd7944277b055ba84b034b Mon Sep 17 00:00:00 2001 From: Janusz Jakubiec Date: Mon, 7 Oct 2024 15:38:56 +0200 Subject: [PATCH 11/12] Modifying documentation to add information about cockroachDB. --- .../database-backends-configuration.md | 31 ++++++++++++++++++- doc/configuration/outgoing-connections.md | 16 +++++----- doc/migrations/6.2.1_x.x.x.md | 21 ++++++++++++- doc/modules/mod_mam.md | 2 +- src/vcard/mod_vcard_rdbms.erl | 22 ++++++------- tools/setup-db.sh | 1 + 6 files changed, 71 insertions(+), 22 deletions(-) diff --git a/doc/configuration/database-backends-configuration.md b/doc/configuration/database-backends-configuration.md index f3d9e7869c9..4046fb16b34 100644 --- a/doc/configuration/database-backends-configuration.md +++ b/doc/configuration/database-backends-configuration.md @@ -43,7 +43,7 @@ Persistent Data: * RDBMS - MongooseIM has a strong backend support for relational databases. Reliable and battle proven, they are a great choice for regular MongooseIM use cases and features like `privacy lists`, `vcards`, `roster`, `private storage`, `last activity` and `message archive`. Never loose your data. - Use MySQL, MariaDB, PostgreSQL, or MS SQL Server. + Use MySQL, MariaDB, PostgreSQL, CockroachDB, or MS SQL Server. * Cassandra - Only for MAM (Message Archive Management). @@ -125,6 +125,35 @@ Please refer to the [RDBMS options](outgoing-connections.md#rdbms-options) and [general database options](general.md#database-settings) for more information. +### CockroachDB + +**Can be used for:** + +* users (credentials) +* vcards +* roster +* private storage +* privacy/block lists +* last activity +* mam (message archive management) +* muc_light rooms + +**Setup** + +The schema files can be found in the `priv` directory. +The default schema is defined in the `cockroachdb.sql` file. + +You can use the following command to apply it on localhost: + +```bash +psql -h localhost -U user -p 26257 -c "CREATE DATABASE mongooseim;" +psql -h localhost -U user -p 26257 -q -d mongooseim -f cockroachdb.sql +``` +You should also configure the CockroachDB database in the `mongooseim.toml` file. +Please refer to the [RDBMS options](outgoing-connections.md#rdbms-options) +and [general database options](general.md#database-settings) +for more information. + ### Microsoft SQL Server Microsoft SQL Server, sometimes called MSSQL, or Azure SQL Database. diff --git a/doc/configuration/outgoing-connections.md b/doc/configuration/outgoing-connections.md index 9c3eeeccb2b..808f06050dd 100644 --- a/doc/configuration/outgoing-connections.md +++ b/doc/configuration/outgoing-connections.md @@ -74,7 +74,7 @@ For example: ### RDBMS options #### `outgoing_pools.rdbms.*.connection.driver` -* **Syntax:** string, one of `"pgsql"`, `"mysql"` or `"odbc"` (a supported driver) +* **Syntax:** string, one of `"pgsql"`, `"mysql"`, `"cockroachdb"` or `"odbc"` (a supported driver) * **Default:** none - this option is mandatory * **Example:** `driver = "psgql"` @@ -101,34 +101,34 @@ How long MongooseIM will wait for the database to answer for a query. When MongooseIM fails to connect to the DB, it retries with an exponential backoff. This option limits the backoff time for faster reconnection when the DB becomes reachable again. -### Options for `pgsql` and `mysql` +### Options for `pgsql`, `cockroachdb` and `mysql` #### `outgoing_pools.rdbms.*.connection.host` * **Syntax:** string -* **Default:** no default; required for `pgsql` and `mysql` +* **Default:** no default; required for `pgsql`, `cockroachdb` and `mysql` * **Example:** `host = "localhost"` #### `outgoing_pools.rdbms.*.connection.port` * **Syntax:** integer, between 0 and 65535 -* **Default:** `5432` for `pgsql`; `3306` for `mysql` +* **Default:** `5432` for `pgsql`; `26257` for `cockroachdb`; `3306` for `mysql` * **Example:** `port = 5343` #### `outgoing_pools.rdbms.*.connection.database` * **Syntax:** string -* **Default:** no default; required for `pgsql` and `mysql` +* **Default:** no default; required for `pgsql`, `cockroachdb` and `mysql` * **Example:** `database = "mim-db"` #### `outgoing_pools.rdbms.*.connection.username` * **Syntax:** string -* **Default:** no default; required for `pgsql` and `mysql` +* **Default:** no default; required for `pgsql`, `cockroachdb` and `mysql` * **Example:** `username = "mim-user"` #### `outgoing_pools.rdbms.*.connection.password` * **Syntax:** string -* **Default:** no default; required for `pgsql` and `mysql` +* **Default:** no default; required for `pgsql`, `cockroachdb` and `mysql` * **Example:** `password = "mim-password"` -To enable TLS, you need to include the [TLS section](#tls-options) in the connection options. There is one additonal option for PostgreSQL: +To enable TLS, you need to include the [TLS section](#tls-options) in the connection options. There is one additonal option for PostgreSQL and CockroachDB: #### `outgoing_pools.rdbms.*.connection.tls.required` * **Syntax:** boolean diff --git a/doc/migrations/6.2.1_x.x.x.md b/doc/migrations/6.2.1_x.x.x.md index 0550ebb6e64..aa44fe36756 100644 --- a/doc/migrations/6.2.1_x.x.x.md +++ b/doc/migrations/6.2.1_x.x.x.md @@ -107,4 +107,23 @@ Stop the cluster, or individual nodes, if performing a rolling upgrade, and exec 3. Add Prometheus to the [instrumentation section](../configuration/instrumentation.md). 4. Configure a [listener](../listeners/listen-http.md#handler-types-prometheus-mongoose_prometheus_handler) for Prometheus. -Restart the node or cluster. \ No newline at end of file +Restart the node or cluster. + +## Support for the new RDBMS database - CockroachDB + +We've introduced support for a new RDBMS database, CockroachDB. To ensure compatibility and proper functionality, we've also added a new test preset: `cockroachdb_cets`. + +To configure CockroachDB, update the connection settings in your configuration file as follows: +```toml +[outgoing_pools.rdbms.default.connection] + driver = "cockroachdb" + host = "localhost" + port = 26257 + database = "mongooseim" + username = "mongooseim" + password = "mongooseim_secret" +``` + +Refer to the [Outgoing connections configuration doc](../configuration/outgoing-connections.md) for more information. + +The migration script for CockroachDB , `cockroachdb.sql` is located in the [`priv/migrations`](https://github.com/esl/MongooseIM/tree/master/priv/migrations) directory. \ No newline at end of file diff --git a/doc/modules/mod_mam.md b/doc/modules/mod_mam.md index 53cadf4dd24..6bde433f643 100644 --- a/doc/modules/mod_mam.md +++ b/doc/modules/mod_mam.md @@ -9,7 +9,7 @@ MongooseIM is compatible with MAM 0.4-1.1.0. Configure MAM with different storage backends: -* RDBMS (databases like MySQL, PostgreSQL, MS SQL Server) +* RDBMS (databases like MySQL, PostgreSQL, CockroachDB, MS SQL Server) * Cassandra (NoSQL) * ElasticSearch (NoSQL) diff --git a/src/vcard/mod_vcard_rdbms.erl b/src/vcard/mod_vcard_rdbms.erl index fe219134273..c234c7ab777 100644 --- a/src/vcard/mod_vcard_rdbms.erl +++ b/src/vcard/mod_vcard_rdbms.erl @@ -360,17 +360,17 @@ column_to_id(<<"lorgunit">>) -> "U". search_columns() -> [<<"username">>, - <<"fn">>, <<"lfn">>, - <<"family">>, <<"lfamily">>, - <<"given">>, <<"lgiven">>, - <<"middle">>, <<"lmiddle">>, - <<"nickname">>, <<"lnickname">>, - <<"bday">>, <<"lbday">>, - <<"ctry">>, <<"lctry">>, - <<"locality">>, <<"llocality">>, - <<"email">>, <<"lemail">>, - <<"orgname">>, <<"lorgname">>, - <<"orgunit">>, <<"lorgunit">>]. + <<"fn">>, <<"lfn">>, + <<"family">>, <<"lfamily">>, + <<"given">>, <<"lgiven">>, + <<"middle">>, <<"lmiddle">>, + <<"nickname">>, <<"lnickname">>, + <<"bday">>, <<"lbday">>, + <<"ctry">>, <<"lctry">>, + <<"locality">>, <<"llocality">>, + <<"email">>, <<"lemail">>, + <<"orgname">>, <<"lorgname">>, + <<"orgunit">>, <<"lorgunit">>]. search_args(User, Search) -> [User, diff --git a/tools/setup-db.sh b/tools/setup-db.sh index e7cbfb4c87d..07fb0f96269 100755 --- a/tools/setup-db.sh +++ b/tools/setup-db.sh @@ -2,6 +2,7 @@ # Environment variable DB is used by this script. # If DB is undefined, than this script does nothing. + # Docker for Mac should be used on Mac (not docker-machine!) # https://store.docker.com/editions/community/docker-ce-desktop-mac From d05c632fedd6dc9bc1aa0b30b9b02de4edcb4d5c Mon Sep 17 00:00:00 2001 From: Janusz Jakubiec Date: Thu, 10 Oct 2024 14:50:39 +0200 Subject: [PATCH 12/12] Fixing CR comments --- doc/migrations/6.2.1_x.x.x.md | 2 +- src/rdbms/mongoose_rdbms.erl | 2 +- src/rdbms/mongoose_rdbms_cockroachdb.erl | 16 ---------------- src/rdbms/rdbms_queries.erl | 24 +++++++++++------------- 4 files changed, 13 insertions(+), 31 deletions(-) diff --git a/doc/migrations/6.2.1_x.x.x.md b/doc/migrations/6.2.1_x.x.x.md index aa44fe36756..08e5e797d15 100644 --- a/doc/migrations/6.2.1_x.x.x.md +++ b/doc/migrations/6.2.1_x.x.x.md @@ -111,7 +111,7 @@ Restart the node or cluster. ## Support for the new RDBMS database - CockroachDB -We've introduced support for a new RDBMS database, CockroachDB. To ensure compatibility and proper functionality, we've also added a new test preset: `cockroachdb_cets`. +We've introduced support for a new RDBMS database, CockroachDB. To configure CockroachDB, update the connection settings in your configuration file as follows: ```toml diff --git a/src/rdbms/mongoose_rdbms.erl b/src/rdbms/mongoose_rdbms.erl index 8f7af7c93da..2a7abde452a 100644 --- a/src/rdbms/mongoose_rdbms.erl +++ b/src/rdbms/mongoose_rdbms.erl @@ -915,7 +915,7 @@ abort_on_driver_error({error, "Failed sending data on socket" ++ _}) -> %% mysql abort_on_driver_error(_) -> continue. --spec db_engine(mongooseim:host_type_or_global()) -> odbc | mysql | pgsql | cockroachdb | undefined. +-spec db_engine(mongooseim:host_type_or_global()) -> backend() | undefined. db_engine(_HostType) -> try mongoose_backend:get_backend_name(global, ?MODULE) catch error:badarg -> undefined end. diff --git a/src/rdbms/mongoose_rdbms_cockroachdb.erl b/src/rdbms/mongoose_rdbms_cockroachdb.erl index b113e3b084c..2d77a5c3927 100644 --- a/src/rdbms/mongoose_rdbms_cockroachdb.erl +++ b/src/rdbms/mongoose_rdbms_cockroachdb.erl @@ -1,19 +1,3 @@ -%%============================================================================== -%% Copyright 2016 Erlang Solutions Ltd. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%============================================================================== - -module(mongoose_rdbms_cockroachdb). -author('janusz.jakubiec@erlang-solutions.com'). -behaviour(mongoose_rdbms_backend). diff --git a/src/rdbms/rdbms_queries.erl b/src/rdbms/rdbms_queries.erl index 5f647f80fcb..04089f262dc 100644 --- a/src/rdbms/rdbms_queries.erl +++ b/src/rdbms/rdbms_queries.erl @@ -172,10 +172,10 @@ prepare_upsert(HostType, Name, Table, InsertFields, Updates, UniqueKeyFields) -> IncrementalField :: none | binary()) -> {ok, QueryName :: mongoose_rdbms:query_name()} | {error, already_exists}. prepare_upsert(HostType, Name, Table, InsertFields, Updates, UniqueKeyFields, IncrementalField) -> - InsertFieldsTransformed = transform_fields(HostType, InsertFields), - UpdatesTransformed = transform_fields(HostType, Updates), - UniqueKeyFieldsTransformed = transform_fields(HostType, UniqueKeyFields), - IncrementalFieldTransformed = transform_fields(HostType, IncrementalField), + InsertFieldsTransformed = format_fields_for_db(HostType, InsertFields), + UpdatesTransformed = format_fields_for_db(HostType, Updates), + UniqueKeyFieldsTransformed = format_fields_for_db(HostType, UniqueKeyFields), + IncrementalFieldTransformed = format_fields_for_db(HostType, IncrementalField), SQL = upsert_query(HostType, Table, InsertFieldsTransformed, UpdatesTransformed, UniqueKeyFieldsTransformed, IncrementalFieldTransformed), Query = iolist_to_binary(SQL), @@ -212,9 +212,9 @@ prepared_upsert_many_fields(RecordCount, InsertFields, Updates, _UniqueKeyFields UniqueKeyFields :: [binary()]) -> {ok, QueryName :: mongoose_rdbms:query_name()} | {error, already_exists}. prepare_upsert_many(HostType, RecordCount, Name, Table, InsertFields, Updates, UniqueKeyFields) -> - InsertFieldsTransformed = transform_fields(HostType, InsertFields), - UpdatesTransformed = transform_fields(HostType, Updates), - UniqueKeyFieldsTransformed = transform_fields(HostType, UniqueKeyFields), + InsertFieldsTransformed = format_fields_for_db(HostType, InsertFields), + UpdatesTransformed = format_fields_for_db(HostType, Updates), + UniqueKeyFieldsTransformed = format_fields_for_db(HostType, UniqueKeyFields), SQL = upsert_query_many(HostType, RecordCount, Table, InsertFieldsTransformed, UpdatesTransformed, UniqueKeyFieldsTransformed), Query = iolist_to_binary(SQL), @@ -469,16 +469,16 @@ limit_offset_args(Limit, Offset) -> limit_offset_args(mssql, Limit, Offset) -> [Offset, Limit]; limit_offset_args(_, Limit, Offset) -> [Limit, Offset]. -transform_fields(_, none) -> +format_fields_for_db(_, none) -> none; -transform_fields(HostType, Fields) when is_list(Fields) -> +format_fields_for_db(HostType, Fields) when is_list(Fields) -> case mongoose_rdbms:db_engine(HostType) of cockroachdb -> lists:map(fun(Element) -> transform_field(Element) end, Fields); _ -> Fields end; -transform_fields(HostType, Field) when is_binary(Field) -> +format_fields_for_db(HostType, Field) when is_binary(Field) -> case mongoose_rdbms:db_engine(HostType) of cockroachdb -> transform_field(Field); @@ -489,6 +489,4 @@ transform_fields(HostType, Field) when is_binary(Field) -> transform_field({_, Field, _} = Element) -> erlang:setelement(2, Element, transform_field(Field)); transform_field(Field) when is_binary(Field)-> - <<"\"", Field/binary, "\"">>; -transform_field(Element) -> - Element. + <<"\"", Field/binary, "\"">>.